1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/module.h>
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/reboot.h>
36#include <linux/watchdog.h>
37#include <linux/init.h>
38#include <linux/idr.h>
39#include <linux/err.h>
40#include <linux/of.h>
41
42#include "watchdog_core.h"
43
44static DEFINE_IDA(watchdog_ida);
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59static DEFINE_MUTEX(wtd_deferred_reg_mutex);
60static LIST_HEAD(wtd_deferred_reg_list);
61static bool wtd_deferred_reg_done;
62
63static int watchdog_deferred_registration_add(struct watchdog_device *wdd)
64{
65 list_add_tail(&wdd->deferred,
66 &wtd_deferred_reg_list);
67 return 0;
68}
69
70static void watchdog_deferred_registration_del(struct watchdog_device *wdd)
71{
72 struct list_head *p, *n;
73 struct watchdog_device *wdd_tmp;
74
75 list_for_each_safe(p, n, &wtd_deferred_reg_list) {
76 wdd_tmp = list_entry(p, struct watchdog_device,
77 deferred);
78 if (wdd_tmp == wdd) {
79 list_del(&wdd_tmp->deferred);
80 break;
81 }
82 }
83}
84
85static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
86{
87
88
89
90
91 if (wdd->min_timeout > wdd->max_timeout) {
92 pr_info("Invalid min and max timeout values, resetting to 0!\n");
93 wdd->min_timeout = 0;
94 wdd->max_timeout = 0;
95 }
96}
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111int watchdog_init_timeout(struct watchdog_device *wdd,
112 unsigned int timeout_parm, struct device *dev)
113{
114 unsigned int t = 0;
115 int ret = 0;
116
117 watchdog_check_min_max_timeout(wdd);
118
119
120 if (!watchdog_timeout_invalid(wdd, timeout_parm) && timeout_parm) {
121 wdd->timeout = timeout_parm;
122 return ret;
123 }
124 if (timeout_parm)
125 ret = -EINVAL;
126
127
128 if (dev == NULL || dev->of_node == NULL)
129 return ret;
130 of_property_read_u32(dev->of_node, "timeout-sec", &t);
131 if (!watchdog_timeout_invalid(wdd, t) && t)
132 wdd->timeout = t;
133 else
134 ret = -EINVAL;
135
136 return ret;
137}
138EXPORT_SYMBOL_GPL(watchdog_init_timeout);
139
140static int watchdog_reboot_notifier(struct notifier_block *nb,
141 unsigned long code, void *data)
142{
143 struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
144 reboot_nb);
145
146 if (code == SYS_DOWN || code == SYS_HALT) {
147 if (watchdog_active(wdd)) {
148 int ret;
149
150 ret = wdd->ops->stop(wdd);
151 if (ret)
152 return NOTIFY_BAD;
153 }
154 }
155
156 return NOTIFY_DONE;
157}
158
159static int watchdog_restart_notifier(struct notifier_block *nb,
160 unsigned long action, void *data)
161{
162 struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
163 restart_nb);
164
165 int ret;
166
167 ret = wdd->ops->restart(wdd, action, data);
168 if (ret)
169 return NOTIFY_BAD;
170
171 return NOTIFY_DONE;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188void watchdog_set_restart_priority(struct watchdog_device *wdd, int priority)
189{
190 wdd->restart_nb.priority = priority;
191}
192EXPORT_SYMBOL_GPL(watchdog_set_restart_priority);
193
194static int __watchdog_register_device(struct watchdog_device *wdd)
195{
196 int ret, id = -1;
197
198 if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
199 return -EINVAL;
200
201
202 if (!wdd->ops->start || (!wdd->ops->stop && !wdd->max_hw_heartbeat_ms))
203 return -EINVAL;
204
205 watchdog_check_min_max_timeout(wdd);
206
207
208
209
210
211
212
213
214 if (wdd->parent) {
215 ret = of_alias_get_id(wdd->parent->of_node, "watchdog");
216 if (ret >= 0)
217 id = ida_simple_get(&watchdog_ida, ret,
218 ret + 1, GFP_KERNEL);
219 }
220
221 if (id < 0)
222 id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
223
224 if (id < 0)
225 return id;
226 wdd->id = id;
227
228 ret = watchdog_dev_register(wdd);
229 if (ret) {
230 ida_simple_remove(&watchdog_ida, id);
231 if (!(id == 0 && ret == -EBUSY))
232 return ret;
233
234
235 id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
236 if (id < 0)
237 return id;
238 wdd->id = id;
239
240 ret = watchdog_dev_register(wdd);
241 if (ret) {
242 ida_simple_remove(&watchdog_ida, id);
243 return ret;
244 }
245 }
246
247 if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
248 wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
249
250 ret = register_reboot_notifier(&wdd->reboot_nb);
251 if (ret) {
252 pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
253 wdd->id, ret);
254 watchdog_dev_unregister(wdd);
255 ida_simple_remove(&watchdog_ida, wdd->id);
256 return ret;
257 }
258 }
259
260 if (wdd->ops->restart) {
261 wdd->restart_nb.notifier_call = watchdog_restart_notifier;
262
263 ret = register_restart_handler(&wdd->restart_nb);
264 if (ret)
265 pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
266 wdd->id, ret);
267 }
268
269 return 0;
270}
271
272
273
274
275
276
277
278
279
280
281
282
283int watchdog_register_device(struct watchdog_device *wdd)
284{
285 int ret;
286
287 mutex_lock(&wtd_deferred_reg_mutex);
288 if (wtd_deferred_reg_done)
289 ret = __watchdog_register_device(wdd);
290 else
291 ret = watchdog_deferred_registration_add(wdd);
292 mutex_unlock(&wtd_deferred_reg_mutex);
293 return ret;
294}
295EXPORT_SYMBOL_GPL(watchdog_register_device);
296
297static void __watchdog_unregister_device(struct watchdog_device *wdd)
298{
299 if (wdd == NULL)
300 return;
301
302 if (wdd->ops->restart)
303 unregister_restart_handler(&wdd->restart_nb);
304
305 if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
306 unregister_reboot_notifier(&wdd->reboot_nb);
307
308 watchdog_dev_unregister(wdd);
309 ida_simple_remove(&watchdog_ida, wdd->id);
310}
311
312
313
314
315
316
317
318
319
320void watchdog_unregister_device(struct watchdog_device *wdd)
321{
322 mutex_lock(&wtd_deferred_reg_mutex);
323 if (wtd_deferred_reg_done)
324 __watchdog_unregister_device(wdd);
325 else
326 watchdog_deferred_registration_del(wdd);
327 mutex_unlock(&wtd_deferred_reg_mutex);
328}
329
330EXPORT_SYMBOL_GPL(watchdog_unregister_device);
331
332static int __init watchdog_deferred_registration(void)
333{
334 mutex_lock(&wtd_deferred_reg_mutex);
335 wtd_deferred_reg_done = true;
336 while (!list_empty(&wtd_deferred_reg_list)) {
337 struct watchdog_device *wdd;
338
339 wdd = list_first_entry(&wtd_deferred_reg_list,
340 struct watchdog_device, deferred);
341 list_del(&wdd->deferred);
342 __watchdog_register_device(wdd);
343 }
344 mutex_unlock(&wtd_deferred_reg_mutex);
345 return 0;
346}
347
348static int __init watchdog_init(void)
349{
350 int err;
351
352 err = watchdog_dev_init();
353 if (err < 0)
354 return err;
355
356 watchdog_deferred_registration();
357 return 0;
358}
359
360static void __exit watchdog_exit(void)
361{
362 watchdog_dev_exit();
363 ida_destroy(&watchdog_ida);
364}
365
366subsys_initcall_sync(watchdog_init);
367module_exit(watchdog_exit);
368
369MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
370MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
371MODULE_DESCRIPTION("WatchDog Timer Driver Core");
372MODULE_LICENSE("GPL");
373