1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * watchdog_core.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
6*4882a593Smuzhiyun * All Rights Reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This source code is part of the generic code that can be used
11*4882a593Smuzhiyun * by all the watchdog timer drivers.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Based on source code of the following authors:
14*4882a593Smuzhiyun * Matt Domsch <Matt_Domsch@dell.com>,
15*4882a593Smuzhiyun * Rob Radez <rob@osinvestor.com>,
16*4882a593Smuzhiyun * Rusty Lynch <rusty@linux.co.intel.com>
17*4882a593Smuzhiyun * Satyam Sharma <satyam@infradead.org>
18*4882a593Smuzhiyun * Randy Dunlap <randy.dunlap@oracle.com>
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
21*4882a593Smuzhiyun * admit liability nor provide warranty for any of this software.
22*4882a593Smuzhiyun * This material is provided "AS-IS" and at no charge.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <linux/module.h> /* For EXPORT_SYMBOL/module stuff/... */
28*4882a593Smuzhiyun #include <linux/types.h> /* For standard types */
29*4882a593Smuzhiyun #include <linux/errno.h> /* For the -ENODEV/... values */
30*4882a593Smuzhiyun #include <linux/kernel.h> /* For printk/panic/... */
31*4882a593Smuzhiyun #include <linux/reboot.h> /* For restart handler */
32*4882a593Smuzhiyun #include <linux/watchdog.h> /* For watchdog specific items */
33*4882a593Smuzhiyun #include <linux/init.h> /* For __init/__exit/... */
34*4882a593Smuzhiyun #include <linux/idr.h> /* For ida_* macros */
35*4882a593Smuzhiyun #include <linux/err.h> /* For IS_ERR macros */
36*4882a593Smuzhiyun #include <linux/of.h> /* For of_get_timeout_sec */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include "watchdog_core.h" /* For watchdog_dev_register/... */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun static DEFINE_IDA(watchdog_ida);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static int stop_on_reboot = -1;
43*4882a593Smuzhiyun module_param(stop_on_reboot, int, 0444);
44*4882a593Smuzhiyun MODULE_PARM_DESC(stop_on_reboot, "Stop watchdogs on reboot (0=keep watching, 1=stop)");
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Deferred Registration infrastructure.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Sometimes watchdog drivers needs to be loaded as soon as possible,
50*4882a593Smuzhiyun * for example when it's impossible to disable it. To do so,
51*4882a593Smuzhiyun * raising the initcall level of the watchdog driver is a solution.
52*4882a593Smuzhiyun * But in such case, the miscdev is maybe not ready (subsys_initcall), and
53*4882a593Smuzhiyun * watchdog_core need miscdev to register the watchdog as a char device.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * The deferred registration infrastructure offer a way for the watchdog
56*4882a593Smuzhiyun * subsystem to register a watchdog properly, even before miscdev is ready.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static DEFINE_MUTEX(wtd_deferred_reg_mutex);
60*4882a593Smuzhiyun static LIST_HEAD(wtd_deferred_reg_list);
61*4882a593Smuzhiyun static bool wtd_deferred_reg_done;
62*4882a593Smuzhiyun
watchdog_deferred_registration_add(struct watchdog_device * wdd)63*4882a593Smuzhiyun static void watchdog_deferred_registration_add(struct watchdog_device *wdd)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun list_add_tail(&wdd->deferred,
66*4882a593Smuzhiyun &wtd_deferred_reg_list);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
watchdog_deferred_registration_del(struct watchdog_device * wdd)69*4882a593Smuzhiyun static void watchdog_deferred_registration_del(struct watchdog_device *wdd)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct list_head *p, *n;
72*4882a593Smuzhiyun struct watchdog_device *wdd_tmp;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun list_for_each_safe(p, n, &wtd_deferred_reg_list) {
75*4882a593Smuzhiyun wdd_tmp = list_entry(p, struct watchdog_device,
76*4882a593Smuzhiyun deferred);
77*4882a593Smuzhiyun if (wdd_tmp == wdd) {
78*4882a593Smuzhiyun list_del(&wdd_tmp->deferred);
79*4882a593Smuzhiyun break;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
watchdog_check_min_max_timeout(struct watchdog_device * wdd)84*4882a593Smuzhiyun static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Check that we have valid min and max timeout values, if
88*4882a593Smuzhiyun * not reset them both to 0 (=not used or unknown)
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun if (!wdd->max_hw_heartbeat_ms && wdd->min_timeout > wdd->max_timeout) {
91*4882a593Smuzhiyun pr_info("Invalid min and max timeout values, resetting to 0!\n");
92*4882a593Smuzhiyun wdd->min_timeout = 0;
93*4882a593Smuzhiyun wdd->max_timeout = 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun * watchdog_init_timeout() - initialize the timeout field
99*4882a593Smuzhiyun * @wdd: watchdog device
100*4882a593Smuzhiyun * @timeout_parm: timeout module parameter
101*4882a593Smuzhiyun * @dev: Device that stores the timeout-sec property
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Initialize the timeout field of the watchdog_device struct with either the
104*4882a593Smuzhiyun * timeout module parameter (if it is valid value) or the timeout-sec property
105*4882a593Smuzhiyun * (only if it is a valid value and the timeout_parm is out of bounds).
106*4882a593Smuzhiyun * If none of them are valid then we keep the old value (which should normally
107*4882a593Smuzhiyun * be the default timeout value). Note that for the module parameter, '0' means
108*4882a593Smuzhiyun * 'use default' while it is an invalid value for the timeout-sec property.
109*4882a593Smuzhiyun * It should simply be dropped if you want to use the default value then.
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * A zero is returned on success or -EINVAL if all provided values are out of
112*4882a593Smuzhiyun * bounds.
113*4882a593Smuzhiyun */
watchdog_init_timeout(struct watchdog_device * wdd,unsigned int timeout_parm,struct device * dev)114*4882a593Smuzhiyun int watchdog_init_timeout(struct watchdog_device *wdd,
115*4882a593Smuzhiyun unsigned int timeout_parm, struct device *dev)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun const char *dev_str = wdd->parent ? dev_name(wdd->parent) :
118*4882a593Smuzhiyun (const char *)wdd->info->identity;
119*4882a593Smuzhiyun unsigned int t = 0;
120*4882a593Smuzhiyun int ret = 0;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun watchdog_check_min_max_timeout(wdd);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* check the driver supplied value (likely a module parameter) first */
125*4882a593Smuzhiyun if (timeout_parm) {
126*4882a593Smuzhiyun if (!watchdog_timeout_invalid(wdd, timeout_parm)) {
127*4882a593Smuzhiyun wdd->timeout = timeout_parm;
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun pr_err("%s: driver supplied timeout (%u) out of range\n",
131*4882a593Smuzhiyun dev_str, timeout_parm);
132*4882a593Smuzhiyun ret = -EINVAL;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* try to get the timeout_sec property */
136*4882a593Smuzhiyun if (dev && dev->of_node &&
137*4882a593Smuzhiyun of_property_read_u32(dev->of_node, "timeout-sec", &t) == 0) {
138*4882a593Smuzhiyun if (t && !watchdog_timeout_invalid(wdd, t)) {
139*4882a593Smuzhiyun wdd->timeout = t;
140*4882a593Smuzhiyun return 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun pr_err("%s: DT supplied timeout (%u) out of range\n", dev_str, t);
143*4882a593Smuzhiyun ret = -EINVAL;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (ret < 0 && wdd->timeout)
147*4882a593Smuzhiyun pr_warn("%s: falling back to default timeout (%u)\n", dev_str,
148*4882a593Smuzhiyun wdd->timeout);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return ret;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(watchdog_init_timeout);
153*4882a593Smuzhiyun
watchdog_reboot_notifier(struct notifier_block * nb,unsigned long code,void * data)154*4882a593Smuzhiyun static int watchdog_reboot_notifier(struct notifier_block *nb,
155*4882a593Smuzhiyun unsigned long code, void *data)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct watchdog_device *wdd;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun wdd = container_of(nb, struct watchdog_device, reboot_nb);
160*4882a593Smuzhiyun if (code == SYS_DOWN || code == SYS_HALT) {
161*4882a593Smuzhiyun if (watchdog_active(wdd)) {
162*4882a593Smuzhiyun int ret;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun ret = wdd->ops->stop(wdd);
165*4882a593Smuzhiyun if (ret)
166*4882a593Smuzhiyun return NOTIFY_BAD;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return NOTIFY_DONE;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
watchdog_restart_notifier(struct notifier_block * nb,unsigned long action,void * data)173*4882a593Smuzhiyun static int watchdog_restart_notifier(struct notifier_block *nb,
174*4882a593Smuzhiyun unsigned long action, void *data)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
177*4882a593Smuzhiyun restart_nb);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun int ret;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun ret = wdd->ops->restart(wdd, action, data);
182*4882a593Smuzhiyun if (ret)
183*4882a593Smuzhiyun return NOTIFY_BAD;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return NOTIFY_DONE;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun * watchdog_set_restart_priority - Change priority of restart handler
190*4882a593Smuzhiyun * @wdd: watchdog device
191*4882a593Smuzhiyun * @priority: priority of the restart handler, should follow these guidelines:
192*4882a593Smuzhiyun * 0: use watchdog's restart function as last resort, has limited restart
193*4882a593Smuzhiyun * capabilies
194*4882a593Smuzhiyun * 128: default restart handler, use if no other handler is expected to be
195*4882a593Smuzhiyun * available and/or if restart is sufficient to restart the entire system
196*4882a593Smuzhiyun * 255: preempt all other handlers
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * If a wdd->ops->restart function is provided when watchdog_register_device is
199*4882a593Smuzhiyun * called, it will be registered as a restart handler with the priority given
200*4882a593Smuzhiyun * here.
201*4882a593Smuzhiyun */
watchdog_set_restart_priority(struct watchdog_device * wdd,int priority)202*4882a593Smuzhiyun void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun wdd->restart_nb.priority = priority;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(watchdog_set_restart_priority);
207*4882a593Smuzhiyun
__watchdog_register_device(struct watchdog_device * wdd)208*4882a593Smuzhiyun static int __watchdog_register_device(struct watchdog_device *wdd)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun int ret, id = -1;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
213*4882a593Smuzhiyun return -EINVAL;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Mandatory operations need to be supported */
216*4882a593Smuzhiyun if (!wdd->ops->start || (!wdd->ops->stop && !wdd->max_hw_heartbeat_ms))
217*4882a593Smuzhiyun return -EINVAL;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun watchdog_check_min_max_timeout(wdd);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Note: now that all watchdog_device data has been verified, we
223*4882a593Smuzhiyun * will not check this anymore in other functions. If data gets
224*4882a593Smuzhiyun * corrupted in a later stage then we expect a kernel panic!
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Use alias for watchdog id if possible */
228*4882a593Smuzhiyun if (wdd->parent) {
229*4882a593Smuzhiyun ret = of_alias_get_id(wdd->parent->of_node, "watchdog");
230*4882a593Smuzhiyun if (ret >= 0)
231*4882a593Smuzhiyun id = ida_simple_get(&watchdog_ida, ret,
232*4882a593Smuzhiyun ret + 1, GFP_KERNEL);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (id < 0)
236*4882a593Smuzhiyun id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (id < 0)
239*4882a593Smuzhiyun return id;
240*4882a593Smuzhiyun wdd->id = id;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun ret = watchdog_dev_register(wdd);
243*4882a593Smuzhiyun if (ret) {
244*4882a593Smuzhiyun ida_simple_remove(&watchdog_ida, id);
245*4882a593Smuzhiyun if (!(id == 0 && ret == -EBUSY))
246*4882a593Smuzhiyun return ret;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Retry in case a legacy watchdog module exists */
249*4882a593Smuzhiyun id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
250*4882a593Smuzhiyun if (id < 0)
251*4882a593Smuzhiyun return id;
252*4882a593Smuzhiyun wdd->id = id;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun ret = watchdog_dev_register(wdd);
255*4882a593Smuzhiyun if (ret) {
256*4882a593Smuzhiyun ida_simple_remove(&watchdog_ida, id);
257*4882a593Smuzhiyun return ret;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Module parameter to force watchdog policy on reboot. */
262*4882a593Smuzhiyun if (stop_on_reboot != -1) {
263*4882a593Smuzhiyun if (stop_on_reboot)
264*4882a593Smuzhiyun set_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
265*4882a593Smuzhiyun else
266*4882a593Smuzhiyun clear_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
270*4882a593Smuzhiyun if (!wdd->ops->stop)
271*4882a593Smuzhiyun pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
272*4882a593Smuzhiyun else {
273*4882a593Smuzhiyun wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun ret = register_reboot_notifier(&wdd->reboot_nb);
276*4882a593Smuzhiyun if (ret) {
277*4882a593Smuzhiyun pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
278*4882a593Smuzhiyun wdd->id, ret);
279*4882a593Smuzhiyun watchdog_dev_unregister(wdd);
280*4882a593Smuzhiyun ida_simple_remove(&watchdog_ida, id);
281*4882a593Smuzhiyun return ret;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (wdd->ops->restart) {
287*4882a593Smuzhiyun wdd->restart_nb.notifier_call = watchdog_restart_notifier;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun ret = register_restart_handler(&wdd->restart_nb);
290*4882a593Smuzhiyun if (ret)
291*4882a593Smuzhiyun pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
292*4882a593Smuzhiyun wdd->id, ret);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun * watchdog_register_device() - register a watchdog device
300*4882a593Smuzhiyun * @wdd: watchdog device
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * Register a watchdog device with the kernel so that the
303*4882a593Smuzhiyun * watchdog timer can be accessed from userspace.
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * A zero is returned on success and a negative errno code for
306*4882a593Smuzhiyun * failure.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun
watchdog_register_device(struct watchdog_device * wdd)309*4882a593Smuzhiyun int watchdog_register_device(struct watchdog_device *wdd)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun const char *dev_str;
312*4882a593Smuzhiyun int ret = 0;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun mutex_lock(&wtd_deferred_reg_mutex);
315*4882a593Smuzhiyun if (wtd_deferred_reg_done)
316*4882a593Smuzhiyun ret = __watchdog_register_device(wdd);
317*4882a593Smuzhiyun else
318*4882a593Smuzhiyun watchdog_deferred_registration_add(wdd);
319*4882a593Smuzhiyun mutex_unlock(&wtd_deferred_reg_mutex);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (ret) {
322*4882a593Smuzhiyun dev_str = wdd->parent ? dev_name(wdd->parent) :
323*4882a593Smuzhiyun (const char *)wdd->info->identity;
324*4882a593Smuzhiyun pr_err("%s: failed to register watchdog device (err = %d)\n",
325*4882a593Smuzhiyun dev_str, ret);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun return ret;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(watchdog_register_device);
331*4882a593Smuzhiyun
__watchdog_unregister_device(struct watchdog_device * wdd)332*4882a593Smuzhiyun static void __watchdog_unregister_device(struct watchdog_device *wdd)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun if (wdd == NULL)
335*4882a593Smuzhiyun return;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (wdd->ops->restart)
338*4882a593Smuzhiyun unregister_restart_handler(&wdd->restart_nb);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
341*4882a593Smuzhiyun unregister_reboot_notifier(&wdd->reboot_nb);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun watchdog_dev_unregister(wdd);
344*4882a593Smuzhiyun ida_simple_remove(&watchdog_ida, wdd->id);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /**
348*4882a593Smuzhiyun * watchdog_unregister_device() - unregister a watchdog device
349*4882a593Smuzhiyun * @wdd: watchdog device to unregister
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * Unregister a watchdog device that was previously successfully
352*4882a593Smuzhiyun * registered with watchdog_register_device().
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun
watchdog_unregister_device(struct watchdog_device * wdd)355*4882a593Smuzhiyun void watchdog_unregister_device(struct watchdog_device *wdd)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun mutex_lock(&wtd_deferred_reg_mutex);
358*4882a593Smuzhiyun if (wtd_deferred_reg_done)
359*4882a593Smuzhiyun __watchdog_unregister_device(wdd);
360*4882a593Smuzhiyun else
361*4882a593Smuzhiyun watchdog_deferred_registration_del(wdd);
362*4882a593Smuzhiyun mutex_unlock(&wtd_deferred_reg_mutex);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(watchdog_unregister_device);
366*4882a593Smuzhiyun
devm_watchdog_unregister_device(struct device * dev,void * res)367*4882a593Smuzhiyun static void devm_watchdog_unregister_device(struct device *dev, void *res)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun watchdog_unregister_device(*(struct watchdog_device **)res);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /**
373*4882a593Smuzhiyun * devm_watchdog_register_device() - resource managed watchdog_register_device()
374*4882a593Smuzhiyun * @dev: device that is registering this watchdog device
375*4882a593Smuzhiyun * @wdd: watchdog device
376*4882a593Smuzhiyun *
377*4882a593Smuzhiyun * Managed watchdog_register_device(). For watchdog device registered by this
378*4882a593Smuzhiyun * function, watchdog_unregister_device() is automatically called on driver
379*4882a593Smuzhiyun * detach. See watchdog_register_device() for more information.
380*4882a593Smuzhiyun */
devm_watchdog_register_device(struct device * dev,struct watchdog_device * wdd)381*4882a593Smuzhiyun int devm_watchdog_register_device(struct device *dev,
382*4882a593Smuzhiyun struct watchdog_device *wdd)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct watchdog_device **rcwdd;
385*4882a593Smuzhiyun int ret;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*rcwdd),
388*4882a593Smuzhiyun GFP_KERNEL);
389*4882a593Smuzhiyun if (!rcwdd)
390*4882a593Smuzhiyun return -ENOMEM;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun ret = watchdog_register_device(wdd);
393*4882a593Smuzhiyun if (!ret) {
394*4882a593Smuzhiyun *rcwdd = wdd;
395*4882a593Smuzhiyun devres_add(dev, rcwdd);
396*4882a593Smuzhiyun } else {
397*4882a593Smuzhiyun devres_free(rcwdd);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return ret;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_watchdog_register_device);
403*4882a593Smuzhiyun
watchdog_deferred_registration(void)404*4882a593Smuzhiyun static int __init watchdog_deferred_registration(void)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun mutex_lock(&wtd_deferred_reg_mutex);
407*4882a593Smuzhiyun wtd_deferred_reg_done = true;
408*4882a593Smuzhiyun while (!list_empty(&wtd_deferred_reg_list)) {
409*4882a593Smuzhiyun struct watchdog_device *wdd;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun wdd = list_first_entry(&wtd_deferred_reg_list,
412*4882a593Smuzhiyun struct watchdog_device, deferred);
413*4882a593Smuzhiyun list_del(&wdd->deferred);
414*4882a593Smuzhiyun __watchdog_register_device(wdd);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun mutex_unlock(&wtd_deferred_reg_mutex);
417*4882a593Smuzhiyun return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
watchdog_init(void)420*4882a593Smuzhiyun static int __init watchdog_init(void)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun int err;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun err = watchdog_dev_init();
425*4882a593Smuzhiyun if (err < 0)
426*4882a593Smuzhiyun return err;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun watchdog_deferred_registration();
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
watchdog_exit(void)432*4882a593Smuzhiyun static void __exit watchdog_exit(void)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun watchdog_dev_exit();
435*4882a593Smuzhiyun ida_destroy(&watchdog_ida);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun subsys_initcall_sync(watchdog_init);
439*4882a593Smuzhiyun module_exit(watchdog_exit);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
442*4882a593Smuzhiyun MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
443*4882a593Smuzhiyun MODULE_DESCRIPTION("WatchDog Timer Driver Core");
444*4882a593Smuzhiyun MODULE_LICENSE("GPL");
445