xref: /OK3568_Linux_fs/kernel/drivers/watchdog/hpwdt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	HPE WatchDog Driver
4*4882a593Smuzhiyun  *	based on
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *	SoftDog	0.05:	A Software Watchdog Device
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *	(c) Copyright 2018 Hewlett Packard Enterprise Development LP
9*4882a593Smuzhiyun  *	Thomas Mingarelli <thomas.mingarelli@hpe.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/device.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/moduleparam.h>
19*4882a593Smuzhiyun #include <linux/pci.h>
20*4882a593Smuzhiyun #include <linux/pci_ids.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/watchdog.h>
23*4882a593Smuzhiyun #include <asm/nmi.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define HPWDT_VERSION			"2.0.3"
26*4882a593Smuzhiyun #define SECS_TO_TICKS(secs)		((secs) * 1000 / 128)
27*4882a593Smuzhiyun #define TICKS_TO_SECS(ticks)		((ticks) * 128 / 1000)
28*4882a593Smuzhiyun #define HPWDT_MAX_TICKS			65535
29*4882a593Smuzhiyun #define HPWDT_MAX_TIMER			TICKS_TO_SECS(HPWDT_MAX_TICKS)
30*4882a593Smuzhiyun #define DEFAULT_MARGIN			30
31*4882a593Smuzhiyun #define PRETIMEOUT_SEC			9
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static bool ilo5;
34*4882a593Smuzhiyun static unsigned int soft_margin = DEFAULT_MARGIN;	/* in seconds */
35*4882a593Smuzhiyun static bool nowayout = WATCHDOG_NOWAYOUT;
36*4882a593Smuzhiyun static bool pretimeout = IS_ENABLED(CONFIG_HPWDT_NMI_DECODING);
37*4882a593Smuzhiyun static int kdumptimeout = -1;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static void __iomem *pci_mem_addr;		/* the PCI-memory address */
40*4882a593Smuzhiyun static unsigned long __iomem *hpwdt_nmistat;
41*4882a593Smuzhiyun static unsigned long __iomem *hpwdt_timer_reg;
42*4882a593Smuzhiyun static unsigned long __iomem *hpwdt_timer_con;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static const struct pci_device_id hpwdt_devices[] = {
45*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) },	/* iLO2 */
46*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) },	/* iLO3 */
47*4882a593Smuzhiyun 	{0},			/* terminate list */
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, hpwdt_devices);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static const struct pci_device_id hpwdt_blacklist[] = {
52*4882a593Smuzhiyun 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3306, PCI_VENDOR_ID_HP, 0x1979) }, /* auxilary iLO */
53*4882a593Smuzhiyun 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3306, PCI_VENDOR_ID_HP_3PAR, 0x0289) },  /* CL */
54*4882a593Smuzhiyun 	{0},			/* terminate list */
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static struct watchdog_device hpwdt_dev;
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun  *	Watchdog operations
60*4882a593Smuzhiyun  */
hpwdt_hw_is_running(void)61*4882a593Smuzhiyun static int hpwdt_hw_is_running(void)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	return ioread8(hpwdt_timer_con) & 0x01;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
hpwdt_start(struct watchdog_device * wdd)66*4882a593Smuzhiyun static int hpwdt_start(struct watchdog_device *wdd)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	int control = 0x81 | (pretimeout ? 0x4 : 0);
69*4882a593Smuzhiyun 	int reload = SECS_TO_TICKS(min(wdd->timeout, wdd->max_hw_heartbeat_ms/1000));
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	dev_dbg(wdd->parent, "start watchdog 0x%08x:0x%08x:0x%02x\n", wdd->timeout, reload, control);
72*4882a593Smuzhiyun 	iowrite16(reload, hpwdt_timer_reg);
73*4882a593Smuzhiyun 	iowrite8(control, hpwdt_timer_con);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return 0;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
hpwdt_stop(void)78*4882a593Smuzhiyun static void hpwdt_stop(void)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	unsigned long data;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	pr_debug("stop  watchdog\n");
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	data = ioread8(hpwdt_timer_con);
85*4882a593Smuzhiyun 	data &= 0xFE;
86*4882a593Smuzhiyun 	iowrite8(data, hpwdt_timer_con);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
hpwdt_stop_core(struct watchdog_device * wdd)89*4882a593Smuzhiyun static int hpwdt_stop_core(struct watchdog_device *wdd)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	hpwdt_stop();
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
hpwdt_ping_ticks(int val)96*4882a593Smuzhiyun static void hpwdt_ping_ticks(int val)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	val = min(val, HPWDT_MAX_TICKS);
99*4882a593Smuzhiyun 	iowrite16(val, hpwdt_timer_reg);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
hpwdt_ping(struct watchdog_device * wdd)102*4882a593Smuzhiyun static int hpwdt_ping(struct watchdog_device *wdd)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	int reload = SECS_TO_TICKS(min(wdd->timeout, wdd->max_hw_heartbeat_ms/1000));
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	dev_dbg(wdd->parent, "ping  watchdog 0x%08x:0x%08x\n", wdd->timeout, reload);
107*4882a593Smuzhiyun 	hpwdt_ping_ticks(reload);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
hpwdt_gettimeleft(struct watchdog_device * wdd)112*4882a593Smuzhiyun static unsigned int hpwdt_gettimeleft(struct watchdog_device *wdd)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return TICKS_TO_SECS(ioread16(hpwdt_timer_reg));
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
hpwdt_settimeout(struct watchdog_device * wdd,unsigned int val)117*4882a593Smuzhiyun static int hpwdt_settimeout(struct watchdog_device *wdd, unsigned int val)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	dev_dbg(wdd->parent, "set_timeout = %d\n", val);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	wdd->timeout = val;
122*4882a593Smuzhiyun 	if (val <= wdd->pretimeout) {
123*4882a593Smuzhiyun 		dev_dbg(wdd->parent, "pretimeout < timeout. Setting to zero\n");
124*4882a593Smuzhiyun 		wdd->pretimeout = 0;
125*4882a593Smuzhiyun 		pretimeout = 0;
126*4882a593Smuzhiyun 		if (watchdog_active(wdd))
127*4882a593Smuzhiyun 			hpwdt_start(wdd);
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	hpwdt_ping(wdd);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return 0;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #ifdef CONFIG_HPWDT_NMI_DECODING
hpwdt_set_pretimeout(struct watchdog_device * wdd,unsigned int req)135*4882a593Smuzhiyun static int hpwdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	unsigned int val = 0;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	dev_dbg(wdd->parent, "set_pretimeout = %d\n", req);
140*4882a593Smuzhiyun 	if (req) {
141*4882a593Smuzhiyun 		val = PRETIMEOUT_SEC;
142*4882a593Smuzhiyun 		if (val >= wdd->timeout)
143*4882a593Smuzhiyun 			return -EINVAL;
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (val != req)
147*4882a593Smuzhiyun 		dev_dbg(wdd->parent, "Rounding pretimeout to: %d\n", val);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	wdd->pretimeout = val;
150*4882a593Smuzhiyun 	pretimeout = !!val;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (watchdog_active(wdd))
153*4882a593Smuzhiyun 		hpwdt_start(wdd);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
hpwdt_my_nmi(void)158*4882a593Smuzhiyun static int hpwdt_my_nmi(void)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return ioread8(hpwdt_nmistat) & 0x6;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun  *	NMI Handler
165*4882a593Smuzhiyun  */
hpwdt_pretimeout(unsigned int ulReason,struct pt_regs * regs)166*4882a593Smuzhiyun static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	unsigned int mynmi = hpwdt_my_nmi();
169*4882a593Smuzhiyun 	static char panic_msg[] =
170*4882a593Smuzhiyun 		"00: An NMI occurred. Depending on your system the reason "
171*4882a593Smuzhiyun 		"for the NMI is logged in any one of the following resources:\n"
172*4882a593Smuzhiyun 		"1. Integrated Management Log (IML)\n"
173*4882a593Smuzhiyun 		"2. OA Syslog\n"
174*4882a593Smuzhiyun 		"3. OA Forward Progress Log\n"
175*4882a593Smuzhiyun 		"4. iLO Event Log";
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (ilo5 && ulReason == NMI_UNKNOWN && !mynmi)
178*4882a593Smuzhiyun 		return NMI_DONE;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (ilo5 && !pretimeout && !mynmi)
181*4882a593Smuzhiyun 		return NMI_DONE;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (kdumptimeout < 0)
184*4882a593Smuzhiyun 		hpwdt_stop();
185*4882a593Smuzhiyun 	else if (kdumptimeout == 0)
186*4882a593Smuzhiyun 		;
187*4882a593Smuzhiyun 	else {
188*4882a593Smuzhiyun 		unsigned int val = max((unsigned int)kdumptimeout, hpwdt_dev.timeout);
189*4882a593Smuzhiyun 		hpwdt_ping_ticks(SECS_TO_TICKS(val));
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	hex_byte_pack(panic_msg, mynmi);
193*4882a593Smuzhiyun 	nmi_panic(regs, panic_msg);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return NMI_HANDLED;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun #endif /* CONFIG_HPWDT_NMI_DECODING */
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun static const struct watchdog_info ident = {
201*4882a593Smuzhiyun 	.options = WDIOF_PRETIMEOUT    |
202*4882a593Smuzhiyun 		   WDIOF_SETTIMEOUT    |
203*4882a593Smuzhiyun 		   WDIOF_KEEPALIVEPING |
204*4882a593Smuzhiyun 		   WDIOF_MAGICCLOSE,
205*4882a593Smuzhiyun 	.identity = "HPE iLO2+ HW Watchdog Timer",
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun  *	Kernel interfaces
210*4882a593Smuzhiyun  */
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun static const struct watchdog_ops hpwdt_ops = {
213*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
214*4882a593Smuzhiyun 	.start		= hpwdt_start,
215*4882a593Smuzhiyun 	.stop		= hpwdt_stop_core,
216*4882a593Smuzhiyun 	.ping		= hpwdt_ping,
217*4882a593Smuzhiyun 	.set_timeout	= hpwdt_settimeout,
218*4882a593Smuzhiyun 	.get_timeleft	= hpwdt_gettimeleft,
219*4882a593Smuzhiyun #ifdef CONFIG_HPWDT_NMI_DECODING
220*4882a593Smuzhiyun 	.set_pretimeout	= hpwdt_set_pretimeout,
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun };
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun static struct watchdog_device hpwdt_dev = {
225*4882a593Smuzhiyun 	.info		= &ident,
226*4882a593Smuzhiyun 	.ops		= &hpwdt_ops,
227*4882a593Smuzhiyun 	.min_timeout	= 1,
228*4882a593Smuzhiyun 	.timeout	= DEFAULT_MARGIN,
229*4882a593Smuzhiyun 	.pretimeout	= PRETIMEOUT_SEC,
230*4882a593Smuzhiyun 	.max_hw_heartbeat_ms	= HPWDT_MAX_TIMER * 1000,
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun  *	Init & Exit
236*4882a593Smuzhiyun  */
237*4882a593Smuzhiyun 
hpwdt_init_nmi_decoding(struct pci_dev * dev)238*4882a593Smuzhiyun static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun #ifdef CONFIG_HPWDT_NMI_DECODING
241*4882a593Smuzhiyun 	int retval;
242*4882a593Smuzhiyun 	/*
243*4882a593Smuzhiyun 	 * Only one function can register for NMI_UNKNOWN
244*4882a593Smuzhiyun 	 */
245*4882a593Smuzhiyun 	retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout, 0, "hpwdt");
246*4882a593Smuzhiyun 	if (retval)
247*4882a593Smuzhiyun 		goto error;
248*4882a593Smuzhiyun 	retval = register_nmi_handler(NMI_SERR, hpwdt_pretimeout, 0, "hpwdt");
249*4882a593Smuzhiyun 	if (retval)
250*4882a593Smuzhiyun 		goto error1;
251*4882a593Smuzhiyun 	retval = register_nmi_handler(NMI_IO_CHECK, hpwdt_pretimeout, 0, "hpwdt");
252*4882a593Smuzhiyun 	if (retval)
253*4882a593Smuzhiyun 		goto error2;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	dev_info(&dev->dev,
256*4882a593Smuzhiyun 		"HPE Watchdog Timer Driver: NMI decoding initialized\n");
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return 0;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun error2:
261*4882a593Smuzhiyun 	unregister_nmi_handler(NMI_SERR, "hpwdt");
262*4882a593Smuzhiyun error1:
263*4882a593Smuzhiyun 	unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
264*4882a593Smuzhiyun error:
265*4882a593Smuzhiyun 	dev_warn(&dev->dev,
266*4882a593Smuzhiyun 		"Unable to register a die notifier (err=%d).\n",
267*4882a593Smuzhiyun 		retval);
268*4882a593Smuzhiyun 	return retval;
269*4882a593Smuzhiyun #endif	/* CONFIG_HPWDT_NMI_DECODING */
270*4882a593Smuzhiyun 	return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
hpwdt_exit_nmi_decoding(void)273*4882a593Smuzhiyun static void hpwdt_exit_nmi_decoding(void)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun #ifdef CONFIG_HPWDT_NMI_DECODING
276*4882a593Smuzhiyun 	unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
277*4882a593Smuzhiyun 	unregister_nmi_handler(NMI_SERR, "hpwdt");
278*4882a593Smuzhiyun 	unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
279*4882a593Smuzhiyun #endif
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
hpwdt_init_one(struct pci_dev * dev,const struct pci_device_id * ent)282*4882a593Smuzhiyun static int hpwdt_init_one(struct pci_dev *dev,
283*4882a593Smuzhiyun 					const struct pci_device_id *ent)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	int retval;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/*
288*4882a593Smuzhiyun 	 * First let's find out if we are on an iLO2+ server. We will
289*4882a593Smuzhiyun 	 * not run on a legacy ASM box.
290*4882a593Smuzhiyun 	 * So we only support the G5 ProLiant servers and higher.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP &&
293*4882a593Smuzhiyun 	    dev->subsystem_vendor != PCI_VENDOR_ID_HP_3PAR) {
294*4882a593Smuzhiyun 		dev_warn(&dev->dev,
295*4882a593Smuzhiyun 			"This server does not have an iLO2+ ASIC.\n");
296*4882a593Smuzhiyun 		return -ENODEV;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (pci_match_id(hpwdt_blacklist, dev)) {
300*4882a593Smuzhiyun 		dev_dbg(&dev->dev, "Not supported on this device\n");
301*4882a593Smuzhiyun 		return -ENODEV;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (pci_enable_device(dev)) {
305*4882a593Smuzhiyun 		dev_warn(&dev->dev,
306*4882a593Smuzhiyun 			"Not possible to enable PCI Device: 0x%x:0x%x.\n",
307*4882a593Smuzhiyun 			ent->vendor, ent->device);
308*4882a593Smuzhiyun 		return -ENODEV;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	pci_mem_addr = pci_iomap(dev, 1, 0x80);
312*4882a593Smuzhiyun 	if (!pci_mem_addr) {
313*4882a593Smuzhiyun 		dev_warn(&dev->dev,
314*4882a593Smuzhiyun 			"Unable to detect the iLO2+ server memory.\n");
315*4882a593Smuzhiyun 		retval = -ENOMEM;
316*4882a593Smuzhiyun 		goto error_pci_iomap;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 	hpwdt_nmistat	= pci_mem_addr + 0x6e;
319*4882a593Smuzhiyun 	hpwdt_timer_reg = pci_mem_addr + 0x70;
320*4882a593Smuzhiyun 	hpwdt_timer_con = pci_mem_addr + 0x72;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/* Have the core update running timer until user space is ready */
323*4882a593Smuzhiyun 	if (hpwdt_hw_is_running()) {
324*4882a593Smuzhiyun 		dev_info(&dev->dev, "timer is running\n");
325*4882a593Smuzhiyun 		set_bit(WDOG_HW_RUNNING, &hpwdt_dev.status);
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* Initialize NMI Decoding functionality */
329*4882a593Smuzhiyun 	retval = hpwdt_init_nmi_decoding(dev);
330*4882a593Smuzhiyun 	if (retval != 0)
331*4882a593Smuzhiyun 		goto error_init_nmi_decoding;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	watchdog_stop_on_unregister(&hpwdt_dev);
334*4882a593Smuzhiyun 	watchdog_set_nowayout(&hpwdt_dev, nowayout);
335*4882a593Smuzhiyun 	watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (pretimeout && hpwdt_dev.timeout <= PRETIMEOUT_SEC) {
338*4882a593Smuzhiyun 		dev_warn(&dev->dev, "timeout <= pretimeout. Setting pretimeout to zero\n");
339*4882a593Smuzhiyun 		pretimeout = 0;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 	hpwdt_dev.pretimeout = pretimeout ? PRETIMEOUT_SEC : 0;
342*4882a593Smuzhiyun 	kdumptimeout = min(kdumptimeout, HPWDT_MAX_TIMER);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	hpwdt_dev.parent = &dev->dev;
345*4882a593Smuzhiyun 	retval = watchdog_register_device(&hpwdt_dev);
346*4882a593Smuzhiyun 	if (retval < 0)
347*4882a593Smuzhiyun 		goto error_wd_register;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	dev_info(&dev->dev, "HPE Watchdog Timer Driver: Version: %s\n",
350*4882a593Smuzhiyun 				HPWDT_VERSION);
351*4882a593Smuzhiyun 	dev_info(&dev->dev, "timeout: %d seconds (nowayout=%d)\n",
352*4882a593Smuzhiyun 				hpwdt_dev.timeout, nowayout);
353*4882a593Smuzhiyun 	dev_info(&dev->dev, "pretimeout: %s.\n",
354*4882a593Smuzhiyun 				pretimeout ? "on" : "off");
355*4882a593Smuzhiyun 	dev_info(&dev->dev, "kdumptimeout: %d.\n", kdumptimeout);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (dev->subsystem_vendor == PCI_VENDOR_ID_HP_3PAR)
358*4882a593Smuzhiyun 		ilo5 = true;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return 0;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun error_wd_register:
363*4882a593Smuzhiyun 	hpwdt_exit_nmi_decoding();
364*4882a593Smuzhiyun error_init_nmi_decoding:
365*4882a593Smuzhiyun 	pci_iounmap(dev, pci_mem_addr);
366*4882a593Smuzhiyun error_pci_iomap:
367*4882a593Smuzhiyun 	pci_disable_device(dev);
368*4882a593Smuzhiyun 	return retval;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
hpwdt_exit(struct pci_dev * dev)371*4882a593Smuzhiyun static void hpwdt_exit(struct pci_dev *dev)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	watchdog_unregister_device(&hpwdt_dev);
374*4882a593Smuzhiyun 	hpwdt_exit_nmi_decoding();
375*4882a593Smuzhiyun 	pci_iounmap(dev, pci_mem_addr);
376*4882a593Smuzhiyun 	pci_disable_device(dev);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun static struct pci_driver hpwdt_driver = {
380*4882a593Smuzhiyun 	.name = "hpwdt",
381*4882a593Smuzhiyun 	.id_table = hpwdt_devices,
382*4882a593Smuzhiyun 	.probe = hpwdt_init_one,
383*4882a593Smuzhiyun 	.remove = hpwdt_exit,
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun MODULE_AUTHOR("Tom Mingarelli");
387*4882a593Smuzhiyun MODULE_DESCRIPTION("hpe watchdog driver");
388*4882a593Smuzhiyun MODULE_LICENSE("GPL");
389*4882a593Smuzhiyun MODULE_VERSION(HPWDT_VERSION);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun module_param(soft_margin, int, 0);
392*4882a593Smuzhiyun MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun module_param_named(timeout, soft_margin, int, 0);
395*4882a593Smuzhiyun MODULE_PARM_DESC(timeout, "Alias of soft_margin");
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun module_param(nowayout, bool, 0);
398*4882a593Smuzhiyun MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
399*4882a593Smuzhiyun 		__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun module_param(kdumptimeout, int, 0444);
402*4882a593Smuzhiyun MODULE_PARM_DESC(kdumptimeout, "Timeout applied for crash kernel transition in seconds");
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun #ifdef CONFIG_HPWDT_NMI_DECODING
405*4882a593Smuzhiyun module_param(pretimeout, bool, 0);
406*4882a593Smuzhiyun MODULE_PARM_DESC(pretimeout, "Watchdog pretimeout enabled");
407*4882a593Smuzhiyun #endif
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun module_pci_driver(hpwdt_driver);
410