1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kontron PLD watchdog driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010-2013 Kontron Europe GmbH
6*4882a593Smuzhiyun * Author: Michael Brunner <michael.brunner@kontron.com>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Note: From the PLD watchdog point of view timeout and pretimeout are
9*4882a593Smuzhiyun * defined differently than in the kernel.
10*4882a593Smuzhiyun * First the pretimeout stage runs out before the timeout stage gets
11*4882a593Smuzhiyun * active.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Kernel/API: P-----| pretimeout
14*4882a593Smuzhiyun * |-----------------------T timeout
15*4882a593Smuzhiyun * Watchdog: |-----------------P pretimeout_stage
16*4882a593Smuzhiyun * |-----T timeout_stage
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/moduleparam.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <linux/watchdog.h>
23*4882a593Smuzhiyun #include <linux/platform_device.h>
24*4882a593Smuzhiyun #include <linux/mfd/kempld.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define KEMPLD_WDT_STAGE_TIMEOUT(x) (0x1b + (x) * 4)
27*4882a593Smuzhiyun #define KEMPLD_WDT_STAGE_CFG(x) (0x18 + (x))
28*4882a593Smuzhiyun #define STAGE_CFG_GET_PRESCALER(x) (((x) & 0x30) >> 4)
29*4882a593Smuzhiyun #define STAGE_CFG_SET_PRESCALER(x) (((x) & 0x3) << 4)
30*4882a593Smuzhiyun #define STAGE_CFG_PRESCALER_MASK 0x30
31*4882a593Smuzhiyun #define STAGE_CFG_ACTION_MASK 0x7
32*4882a593Smuzhiyun #define STAGE_CFG_ASSERT (1 << 3)
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define KEMPLD_WDT_MAX_STAGES 2
35*4882a593Smuzhiyun #define KEMPLD_WDT_KICK 0x16
36*4882a593Smuzhiyun #define KEMPLD_WDT_CFG 0x17
37*4882a593Smuzhiyun #define KEMPLD_WDT_CFG_ENABLE 0x10
38*4882a593Smuzhiyun #define KEMPLD_WDT_CFG_ENABLE_LOCK 0x8
39*4882a593Smuzhiyun #define KEMPLD_WDT_CFG_GLOBAL_LOCK 0x80
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun enum {
42*4882a593Smuzhiyun ACTION_NONE = 0,
43*4882a593Smuzhiyun ACTION_RESET,
44*4882a593Smuzhiyun ACTION_NMI,
45*4882a593Smuzhiyun ACTION_SMI,
46*4882a593Smuzhiyun ACTION_SCI,
47*4882a593Smuzhiyun ACTION_DELAY,
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun enum {
51*4882a593Smuzhiyun STAGE_TIMEOUT = 0,
52*4882a593Smuzhiyun STAGE_PRETIMEOUT,
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun enum {
56*4882a593Smuzhiyun PRESCALER_21 = 0,
57*4882a593Smuzhiyun PRESCALER_17,
58*4882a593Smuzhiyun PRESCALER_12,
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static const u32 kempld_prescaler[] = {
62*4882a593Smuzhiyun [PRESCALER_21] = (1 << 21) - 1,
63*4882a593Smuzhiyun [PRESCALER_17] = (1 << 17) - 1,
64*4882a593Smuzhiyun [PRESCALER_12] = (1 << 12) - 1,
65*4882a593Smuzhiyun 0,
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct kempld_wdt_stage {
69*4882a593Smuzhiyun unsigned int id;
70*4882a593Smuzhiyun u32 mask;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct kempld_wdt_data {
74*4882a593Smuzhiyun struct kempld_device_data *pld;
75*4882a593Smuzhiyun struct watchdog_device wdd;
76*4882a593Smuzhiyun unsigned int pretimeout;
77*4882a593Smuzhiyun struct kempld_wdt_stage stage[KEMPLD_WDT_MAX_STAGES];
78*4882a593Smuzhiyun #ifdef CONFIG_PM
79*4882a593Smuzhiyun u8 pm_status_store;
80*4882a593Smuzhiyun #endif
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define DEFAULT_TIMEOUT 30 /* seconds */
84*4882a593Smuzhiyun #define DEFAULT_PRETIMEOUT 0
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun static unsigned int timeout = DEFAULT_TIMEOUT;
87*4882a593Smuzhiyun module_param(timeout, uint, 0);
88*4882a593Smuzhiyun MODULE_PARM_DESC(timeout,
89*4882a593Smuzhiyun "Watchdog timeout in seconds. (>=0, default="
90*4882a593Smuzhiyun __MODULE_STRING(DEFAULT_TIMEOUT) ")");
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun static unsigned int pretimeout = DEFAULT_PRETIMEOUT;
93*4882a593Smuzhiyun module_param(pretimeout, uint, 0);
94*4882a593Smuzhiyun MODULE_PARM_DESC(pretimeout,
95*4882a593Smuzhiyun "Watchdog pretimeout in seconds. (>=0, default="
96*4882a593Smuzhiyun __MODULE_STRING(DEFAULT_PRETIMEOUT) ")");
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static bool nowayout = WATCHDOG_NOWAYOUT;
99*4882a593Smuzhiyun module_param(nowayout, bool, 0);
100*4882a593Smuzhiyun MODULE_PARM_DESC(nowayout,
101*4882a593Smuzhiyun "Watchdog cannot be stopped once started (default="
102*4882a593Smuzhiyun __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
103*4882a593Smuzhiyun
kempld_wdt_set_stage_action(struct kempld_wdt_data * wdt_data,struct kempld_wdt_stage * stage,u8 action)104*4882a593Smuzhiyun static int kempld_wdt_set_stage_action(struct kempld_wdt_data *wdt_data,
105*4882a593Smuzhiyun struct kempld_wdt_stage *stage,
106*4882a593Smuzhiyun u8 action)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
109*4882a593Smuzhiyun u8 stage_cfg;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (!stage || !stage->mask)
112*4882a593Smuzhiyun return -EINVAL;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun kempld_get_mutex(pld);
115*4882a593Smuzhiyun stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
116*4882a593Smuzhiyun stage_cfg &= ~STAGE_CFG_ACTION_MASK;
117*4882a593Smuzhiyun stage_cfg |= (action & STAGE_CFG_ACTION_MASK);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (action == ACTION_RESET)
120*4882a593Smuzhiyun stage_cfg |= STAGE_CFG_ASSERT;
121*4882a593Smuzhiyun else
122*4882a593Smuzhiyun stage_cfg &= ~STAGE_CFG_ASSERT;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
125*4882a593Smuzhiyun kempld_release_mutex(pld);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
kempld_wdt_set_stage_timeout(struct kempld_wdt_data * wdt_data,struct kempld_wdt_stage * stage,unsigned int timeout)130*4882a593Smuzhiyun static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
131*4882a593Smuzhiyun struct kempld_wdt_stage *stage,
132*4882a593Smuzhiyun unsigned int timeout)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
135*4882a593Smuzhiyun u32 prescaler;
136*4882a593Smuzhiyun u64 stage_timeout64;
137*4882a593Smuzhiyun u32 stage_timeout;
138*4882a593Smuzhiyun u32 remainder;
139*4882a593Smuzhiyun u8 stage_cfg;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun prescaler = kempld_prescaler[PRESCALER_21];
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (!stage)
144*4882a593Smuzhiyun return -EINVAL;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun stage_timeout64 = (u64)timeout * pld->pld_clock;
147*4882a593Smuzhiyun remainder = do_div(stage_timeout64, prescaler);
148*4882a593Smuzhiyun if (remainder)
149*4882a593Smuzhiyun stage_timeout64++;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (stage_timeout64 > stage->mask)
152*4882a593Smuzhiyun return -EINVAL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun stage_timeout = stage_timeout64 & stage->mask;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun kempld_get_mutex(pld);
157*4882a593Smuzhiyun stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
158*4882a593Smuzhiyun stage_cfg &= ~STAGE_CFG_PRESCALER_MASK;
159*4882a593Smuzhiyun stage_cfg |= STAGE_CFG_SET_PRESCALER(PRESCALER_21);
160*4882a593Smuzhiyun kempld_write8(pld, KEMPLD_WDT_STAGE_CFG(stage->id), stage_cfg);
161*4882a593Smuzhiyun kempld_write32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id),
162*4882a593Smuzhiyun stage_timeout);
163*4882a593Smuzhiyun kempld_release_mutex(pld);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * kempld_get_mutex must be called prior to calling this function.
170*4882a593Smuzhiyun */
kempld_wdt_get_timeout(struct kempld_wdt_data * wdt_data,struct kempld_wdt_stage * stage)171*4882a593Smuzhiyun static unsigned int kempld_wdt_get_timeout(struct kempld_wdt_data *wdt_data,
172*4882a593Smuzhiyun struct kempld_wdt_stage *stage)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
175*4882a593Smuzhiyun unsigned int timeout;
176*4882a593Smuzhiyun u64 stage_timeout;
177*4882a593Smuzhiyun u32 prescaler;
178*4882a593Smuzhiyun u32 remainder;
179*4882a593Smuzhiyun u8 stage_cfg;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (!stage->mask)
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun stage_cfg = kempld_read8(pld, KEMPLD_WDT_STAGE_CFG(stage->id));
185*4882a593Smuzhiyun stage_timeout = kempld_read32(pld, KEMPLD_WDT_STAGE_TIMEOUT(stage->id));
186*4882a593Smuzhiyun prescaler = kempld_prescaler[STAGE_CFG_GET_PRESCALER(stage_cfg)];
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun stage_timeout = (stage_timeout & stage->mask) * prescaler;
189*4882a593Smuzhiyun remainder = do_div(stage_timeout, pld->pld_clock);
190*4882a593Smuzhiyun if (remainder)
191*4882a593Smuzhiyun stage_timeout++;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun timeout = stage_timeout;
194*4882a593Smuzhiyun WARN_ON_ONCE(timeout != stage_timeout);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return timeout;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
kempld_wdt_set_timeout(struct watchdog_device * wdd,unsigned int timeout)199*4882a593Smuzhiyun static int kempld_wdt_set_timeout(struct watchdog_device *wdd,
200*4882a593Smuzhiyun unsigned int timeout)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
203*4882a593Smuzhiyun struct kempld_wdt_stage *pretimeout_stage;
204*4882a593Smuzhiyun struct kempld_wdt_stage *timeout_stage;
205*4882a593Smuzhiyun int ret;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun timeout_stage = &wdt_data->stage[STAGE_TIMEOUT];
208*4882a593Smuzhiyun pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (pretimeout_stage->mask && wdt_data->pretimeout > 0)
211*4882a593Smuzhiyun timeout = wdt_data->pretimeout;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun ret = kempld_wdt_set_stage_action(wdt_data, timeout_stage,
214*4882a593Smuzhiyun ACTION_RESET);
215*4882a593Smuzhiyun if (ret)
216*4882a593Smuzhiyun return ret;
217*4882a593Smuzhiyun ret = kempld_wdt_set_stage_timeout(wdt_data, timeout_stage,
218*4882a593Smuzhiyun timeout);
219*4882a593Smuzhiyun if (ret)
220*4882a593Smuzhiyun return ret;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun wdd->timeout = timeout;
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
kempld_wdt_set_pretimeout(struct watchdog_device * wdd,unsigned int pretimeout)226*4882a593Smuzhiyun static int kempld_wdt_set_pretimeout(struct watchdog_device *wdd,
227*4882a593Smuzhiyun unsigned int pretimeout)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
230*4882a593Smuzhiyun struct kempld_wdt_stage *pretimeout_stage;
231*4882a593Smuzhiyun u8 action = ACTION_NONE;
232*4882a593Smuzhiyun int ret;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (!pretimeout_stage->mask)
237*4882a593Smuzhiyun return -ENXIO;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (pretimeout > wdd->timeout)
240*4882a593Smuzhiyun return -EINVAL;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (pretimeout > 0)
243*4882a593Smuzhiyun action = ACTION_NMI;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun ret = kempld_wdt_set_stage_action(wdt_data, pretimeout_stage,
246*4882a593Smuzhiyun action);
247*4882a593Smuzhiyun if (ret)
248*4882a593Smuzhiyun return ret;
249*4882a593Smuzhiyun ret = kempld_wdt_set_stage_timeout(wdt_data, pretimeout_stage,
250*4882a593Smuzhiyun wdd->timeout - pretimeout);
251*4882a593Smuzhiyun if (ret)
252*4882a593Smuzhiyun return ret;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun wdt_data->pretimeout = pretimeout;
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
kempld_wdt_update_timeouts(struct kempld_wdt_data * wdt_data)258*4882a593Smuzhiyun static void kempld_wdt_update_timeouts(struct kempld_wdt_data *wdt_data)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
261*4882a593Smuzhiyun struct kempld_wdt_stage *pretimeout_stage;
262*4882a593Smuzhiyun struct kempld_wdt_stage *timeout_stage;
263*4882a593Smuzhiyun unsigned int pretimeout, timeout;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
266*4882a593Smuzhiyun timeout_stage = &wdt_data->stage[STAGE_TIMEOUT];
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun kempld_get_mutex(pld);
269*4882a593Smuzhiyun pretimeout = kempld_wdt_get_timeout(wdt_data, pretimeout_stage);
270*4882a593Smuzhiyun timeout = kempld_wdt_get_timeout(wdt_data, timeout_stage);
271*4882a593Smuzhiyun kempld_release_mutex(pld);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (pretimeout)
274*4882a593Smuzhiyun wdt_data->pretimeout = timeout;
275*4882a593Smuzhiyun else
276*4882a593Smuzhiyun wdt_data->pretimeout = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun wdt_data->wdd.timeout = pretimeout + timeout;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
kempld_wdt_start(struct watchdog_device * wdd)281*4882a593Smuzhiyun static int kempld_wdt_start(struct watchdog_device *wdd)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
284*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
285*4882a593Smuzhiyun u8 status;
286*4882a593Smuzhiyun int ret;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun ret = kempld_wdt_set_timeout(wdd, wdd->timeout);
289*4882a593Smuzhiyun if (ret)
290*4882a593Smuzhiyun return ret;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun kempld_get_mutex(pld);
293*4882a593Smuzhiyun status = kempld_read8(pld, KEMPLD_WDT_CFG);
294*4882a593Smuzhiyun status |= KEMPLD_WDT_CFG_ENABLE;
295*4882a593Smuzhiyun kempld_write8(pld, KEMPLD_WDT_CFG, status);
296*4882a593Smuzhiyun status = kempld_read8(pld, KEMPLD_WDT_CFG);
297*4882a593Smuzhiyun kempld_release_mutex(pld);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Check if the watchdog was enabled */
300*4882a593Smuzhiyun if (!(status & KEMPLD_WDT_CFG_ENABLE))
301*4882a593Smuzhiyun return -EACCES;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun return 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
kempld_wdt_stop(struct watchdog_device * wdd)306*4882a593Smuzhiyun static int kempld_wdt_stop(struct watchdog_device *wdd)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
309*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
310*4882a593Smuzhiyun u8 status;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun kempld_get_mutex(pld);
313*4882a593Smuzhiyun status = kempld_read8(pld, KEMPLD_WDT_CFG);
314*4882a593Smuzhiyun status &= ~KEMPLD_WDT_CFG_ENABLE;
315*4882a593Smuzhiyun kempld_write8(pld, KEMPLD_WDT_CFG, status);
316*4882a593Smuzhiyun status = kempld_read8(pld, KEMPLD_WDT_CFG);
317*4882a593Smuzhiyun kempld_release_mutex(pld);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Check if the watchdog was disabled */
320*4882a593Smuzhiyun if (status & KEMPLD_WDT_CFG_ENABLE)
321*4882a593Smuzhiyun return -EACCES;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
kempld_wdt_keepalive(struct watchdog_device * wdd)326*4882a593Smuzhiyun static int kempld_wdt_keepalive(struct watchdog_device *wdd)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
329*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun kempld_get_mutex(pld);
332*4882a593Smuzhiyun kempld_write8(pld, KEMPLD_WDT_KICK, 'K');
333*4882a593Smuzhiyun kempld_release_mutex(pld);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
kempld_wdt_ioctl(struct watchdog_device * wdd,unsigned int cmd,unsigned long arg)338*4882a593Smuzhiyun static long kempld_wdt_ioctl(struct watchdog_device *wdd, unsigned int cmd,
339*4882a593Smuzhiyun unsigned long arg)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
342*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
343*4882a593Smuzhiyun int ret = -ENOIOCTLCMD;
344*4882a593Smuzhiyun int __user *p = argp;
345*4882a593Smuzhiyun int new_value;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun switch (cmd) {
348*4882a593Smuzhiyun case WDIOC_SETPRETIMEOUT:
349*4882a593Smuzhiyun if (get_user(new_value, p))
350*4882a593Smuzhiyun return -EFAULT;
351*4882a593Smuzhiyun ret = kempld_wdt_set_pretimeout(wdd, new_value);
352*4882a593Smuzhiyun if (ret)
353*4882a593Smuzhiyun return ret;
354*4882a593Smuzhiyun ret = kempld_wdt_keepalive(wdd);
355*4882a593Smuzhiyun break;
356*4882a593Smuzhiyun case WDIOC_GETPRETIMEOUT:
357*4882a593Smuzhiyun ret = put_user(wdt_data->pretimeout, (int __user *)arg);
358*4882a593Smuzhiyun break;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun return ret;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
kempld_wdt_probe_stages(struct watchdog_device * wdd)364*4882a593Smuzhiyun static int kempld_wdt_probe_stages(struct watchdog_device *wdd)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = watchdog_get_drvdata(wdd);
367*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
368*4882a593Smuzhiyun struct kempld_wdt_stage *pretimeout_stage;
369*4882a593Smuzhiyun struct kempld_wdt_stage *timeout_stage;
370*4882a593Smuzhiyun u8 index, data, data_orig;
371*4882a593Smuzhiyun u32 mask;
372*4882a593Smuzhiyun int i, j;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun pretimeout_stage = &wdt_data->stage[STAGE_PRETIMEOUT];
375*4882a593Smuzhiyun timeout_stage = &wdt_data->stage[STAGE_TIMEOUT];
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun pretimeout_stage->mask = 0;
378*4882a593Smuzhiyun timeout_stage->mask = 0;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun for (i = 0; i < 3; i++) {
381*4882a593Smuzhiyun index = KEMPLD_WDT_STAGE_TIMEOUT(i);
382*4882a593Smuzhiyun mask = 0;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun kempld_get_mutex(pld);
385*4882a593Smuzhiyun /* Probe each byte individually. */
386*4882a593Smuzhiyun for (j = 0; j < 4; j++) {
387*4882a593Smuzhiyun data_orig = kempld_read8(pld, index + j);
388*4882a593Smuzhiyun kempld_write8(pld, index + j, 0x00);
389*4882a593Smuzhiyun data = kempld_read8(pld, index + j);
390*4882a593Smuzhiyun /* A failed write means this byte is reserved */
391*4882a593Smuzhiyun if (data != 0x00)
392*4882a593Smuzhiyun break;
393*4882a593Smuzhiyun kempld_write8(pld, index + j, data_orig);
394*4882a593Smuzhiyun mask |= 0xff << (j * 8);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun kempld_release_mutex(pld);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Assign available stages to timeout and pretimeout */
399*4882a593Smuzhiyun if (!timeout_stage->mask) {
400*4882a593Smuzhiyun timeout_stage->mask = mask;
401*4882a593Smuzhiyun timeout_stage->id = i;
402*4882a593Smuzhiyun } else {
403*4882a593Smuzhiyun if (pld->feature_mask & KEMPLD_FEATURE_BIT_NMI) {
404*4882a593Smuzhiyun pretimeout_stage->mask = timeout_stage->mask;
405*4882a593Smuzhiyun timeout_stage->mask = mask;
406*4882a593Smuzhiyun pretimeout_stage->id = timeout_stage->id;
407*4882a593Smuzhiyun timeout_stage->id = i;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun break;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (!timeout_stage->mask)
414*4882a593Smuzhiyun return -ENODEV;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun static const struct watchdog_info kempld_wdt_info = {
420*4882a593Smuzhiyun .identity = "KEMPLD Watchdog",
421*4882a593Smuzhiyun .options = WDIOF_SETTIMEOUT |
422*4882a593Smuzhiyun WDIOF_KEEPALIVEPING |
423*4882a593Smuzhiyun WDIOF_MAGICCLOSE |
424*4882a593Smuzhiyun WDIOF_PRETIMEOUT
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun static const struct watchdog_ops kempld_wdt_ops = {
428*4882a593Smuzhiyun .owner = THIS_MODULE,
429*4882a593Smuzhiyun .start = kempld_wdt_start,
430*4882a593Smuzhiyun .stop = kempld_wdt_stop,
431*4882a593Smuzhiyun .ping = kempld_wdt_keepalive,
432*4882a593Smuzhiyun .set_timeout = kempld_wdt_set_timeout,
433*4882a593Smuzhiyun .ioctl = kempld_wdt_ioctl,
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun
kempld_wdt_probe(struct platform_device * pdev)436*4882a593Smuzhiyun static int kempld_wdt_probe(struct platform_device *pdev)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct kempld_device_data *pld = dev_get_drvdata(pdev->dev.parent);
439*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data;
440*4882a593Smuzhiyun struct device *dev = &pdev->dev;
441*4882a593Smuzhiyun struct watchdog_device *wdd;
442*4882a593Smuzhiyun u8 status;
443*4882a593Smuzhiyun int ret = 0;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun wdt_data = devm_kzalloc(dev, sizeof(*wdt_data), GFP_KERNEL);
446*4882a593Smuzhiyun if (!wdt_data)
447*4882a593Smuzhiyun return -ENOMEM;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun wdt_data->pld = pld;
450*4882a593Smuzhiyun wdd = &wdt_data->wdd;
451*4882a593Smuzhiyun wdd->parent = dev;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun kempld_get_mutex(pld);
454*4882a593Smuzhiyun status = kempld_read8(pld, KEMPLD_WDT_CFG);
455*4882a593Smuzhiyun kempld_release_mutex(pld);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* Enable nowayout if watchdog is already locked */
458*4882a593Smuzhiyun if (status & (KEMPLD_WDT_CFG_ENABLE_LOCK |
459*4882a593Smuzhiyun KEMPLD_WDT_CFG_GLOBAL_LOCK)) {
460*4882a593Smuzhiyun if (!nowayout)
461*4882a593Smuzhiyun dev_warn(dev,
462*4882a593Smuzhiyun "Forcing nowayout - watchdog lock enabled!\n");
463*4882a593Smuzhiyun nowayout = true;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun wdd->info = &kempld_wdt_info;
467*4882a593Smuzhiyun wdd->ops = &kempld_wdt_ops;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun watchdog_set_drvdata(wdd, wdt_data);
470*4882a593Smuzhiyun watchdog_set_nowayout(wdd, nowayout);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun ret = kempld_wdt_probe_stages(wdd);
473*4882a593Smuzhiyun if (ret)
474*4882a593Smuzhiyun return ret;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun kempld_wdt_set_timeout(wdd, timeout);
477*4882a593Smuzhiyun kempld_wdt_set_pretimeout(wdd, pretimeout);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* Check if watchdog is already enabled */
480*4882a593Smuzhiyun if (status & KEMPLD_WDT_CFG_ENABLE) {
481*4882a593Smuzhiyun /* Get current watchdog settings */
482*4882a593Smuzhiyun kempld_wdt_update_timeouts(wdt_data);
483*4882a593Smuzhiyun dev_info(dev, "Watchdog was already enabled\n");
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun platform_set_drvdata(pdev, wdt_data);
487*4882a593Smuzhiyun watchdog_stop_on_reboot(wdd);
488*4882a593Smuzhiyun watchdog_stop_on_unregister(wdd);
489*4882a593Smuzhiyun ret = devm_watchdog_register_device(dev, wdd);
490*4882a593Smuzhiyun if (ret)
491*4882a593Smuzhiyun return ret;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun dev_info(dev, "Watchdog registered with %ds timeout\n", wdd->timeout);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun #ifdef CONFIG_PM
499*4882a593Smuzhiyun /* Disable watchdog if it is active during suspend */
kempld_wdt_suspend(struct platform_device * pdev,pm_message_t message)500*4882a593Smuzhiyun static int kempld_wdt_suspend(struct platform_device *pdev,
501*4882a593Smuzhiyun pm_message_t message)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
504*4882a593Smuzhiyun struct kempld_device_data *pld = wdt_data->pld;
505*4882a593Smuzhiyun struct watchdog_device *wdd = &wdt_data->wdd;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun kempld_get_mutex(pld);
508*4882a593Smuzhiyun wdt_data->pm_status_store = kempld_read8(pld, KEMPLD_WDT_CFG);
509*4882a593Smuzhiyun kempld_release_mutex(pld);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun kempld_wdt_update_timeouts(wdt_data);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (wdt_data->pm_status_store & KEMPLD_WDT_CFG_ENABLE)
514*4882a593Smuzhiyun return kempld_wdt_stop(wdd);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* Enable watchdog and configure it if necessary */
kempld_wdt_resume(struct platform_device * pdev)520*4882a593Smuzhiyun static int kempld_wdt_resume(struct platform_device *pdev)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct kempld_wdt_data *wdt_data = platform_get_drvdata(pdev);
523*4882a593Smuzhiyun struct watchdog_device *wdd = &wdt_data->wdd;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * If watchdog was stopped before suspend be sure it gets disabled
527*4882a593Smuzhiyun * again, for the case BIOS has enabled it during resume
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun if (wdt_data->pm_status_store & KEMPLD_WDT_CFG_ENABLE)
530*4882a593Smuzhiyun return kempld_wdt_start(wdd);
531*4882a593Smuzhiyun else
532*4882a593Smuzhiyun return kempld_wdt_stop(wdd);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun #else
535*4882a593Smuzhiyun #define kempld_wdt_suspend NULL
536*4882a593Smuzhiyun #define kempld_wdt_resume NULL
537*4882a593Smuzhiyun #endif
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun static struct platform_driver kempld_wdt_driver = {
540*4882a593Smuzhiyun .driver = {
541*4882a593Smuzhiyun .name = "kempld-wdt",
542*4882a593Smuzhiyun },
543*4882a593Smuzhiyun .probe = kempld_wdt_probe,
544*4882a593Smuzhiyun .suspend = kempld_wdt_suspend,
545*4882a593Smuzhiyun .resume = kempld_wdt_resume,
546*4882a593Smuzhiyun };
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun module_platform_driver(kempld_wdt_driver);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun MODULE_DESCRIPTION("KEM PLD Watchdog Driver");
551*4882a593Smuzhiyun MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>");
552*4882a593Smuzhiyun MODULE_LICENSE("GPL");
553