xref: /OK3568_Linux_fs/kernel/drivers/platform/x86/dcdbas.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  dcdbas.c: Dell Systems Management Base Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  The Dell Systems Management Base Driver provides a sysfs interface for
6*4882a593Smuzhiyun  *  systems management software to perform System Management Interrupts (SMIs)
7*4882a593Smuzhiyun  *  and Host Control Actions (power cycle or power off after OS shutdown) on
8*4882a593Smuzhiyun  *  Dell systems.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  See Documentation/driver-api/dcdbas.rst for more information.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *  Copyright (C) 1995-2006 Dell Inc.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/acpi.h>
17*4882a593Smuzhiyun #include <linux/dma-mapping.h>
18*4882a593Smuzhiyun #include <linux/dmi.h>
19*4882a593Smuzhiyun #include <linux/errno.h>
20*4882a593Smuzhiyun #include <linux/cpu.h>
21*4882a593Smuzhiyun #include <linux/gfp.h>
22*4882a593Smuzhiyun #include <linux/init.h>
23*4882a593Smuzhiyun #include <linux/io.h>
24*4882a593Smuzhiyun #include <linux/kernel.h>
25*4882a593Smuzhiyun #include <linux/mc146818rtc.h>
26*4882a593Smuzhiyun #include <linux/module.h>
27*4882a593Smuzhiyun #include <linux/reboot.h>
28*4882a593Smuzhiyun #include <linux/sched.h>
29*4882a593Smuzhiyun #include <linux/smp.h>
30*4882a593Smuzhiyun #include <linux/spinlock.h>
31*4882a593Smuzhiyun #include <linux/string.h>
32*4882a593Smuzhiyun #include <linux/types.h>
33*4882a593Smuzhiyun #include <linux/mutex.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include "dcdbas.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define DRIVER_NAME		"dcdbas"
38*4882a593Smuzhiyun #define DRIVER_VERSION		"5.6.0-3.4"
39*4882a593Smuzhiyun #define DRIVER_DESCRIPTION	"Dell Systems Management Base Driver"
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static struct platform_device *dcdbas_pdev;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static u8 *smi_data_buf;
44*4882a593Smuzhiyun static dma_addr_t smi_data_buf_handle;
45*4882a593Smuzhiyun static unsigned long smi_data_buf_size;
46*4882a593Smuzhiyun static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
47*4882a593Smuzhiyun static u32 smi_data_buf_phys_addr;
48*4882a593Smuzhiyun static DEFINE_MUTEX(smi_data_lock);
49*4882a593Smuzhiyun static u8 *bios_buffer;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static unsigned int host_control_action;
52*4882a593Smuzhiyun static unsigned int host_control_smi_type;
53*4882a593Smuzhiyun static unsigned int host_control_on_shutdown;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static bool wsmt_enabled;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun  * smi_data_buf_free: free SMI data buffer
59*4882a593Smuzhiyun  */
smi_data_buf_free(void)60*4882a593Smuzhiyun static void smi_data_buf_free(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	if (!smi_data_buf || wsmt_enabled)
63*4882a593Smuzhiyun 		return;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
66*4882a593Smuzhiyun 		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
69*4882a593Smuzhiyun 			  smi_data_buf_handle);
70*4882a593Smuzhiyun 	smi_data_buf = NULL;
71*4882a593Smuzhiyun 	smi_data_buf_handle = 0;
72*4882a593Smuzhiyun 	smi_data_buf_phys_addr = 0;
73*4882a593Smuzhiyun 	smi_data_buf_size = 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * smi_data_buf_realloc: grow SMI data buffer if needed
78*4882a593Smuzhiyun  */
smi_data_buf_realloc(unsigned long size)79*4882a593Smuzhiyun static int smi_data_buf_realloc(unsigned long size)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	void *buf;
82*4882a593Smuzhiyun 	dma_addr_t handle;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (smi_data_buf_size >= size)
85*4882a593Smuzhiyun 		return 0;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (size > max_smi_data_buf_size)
88*4882a593Smuzhiyun 		return -EINVAL;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* new buffer is needed */
91*4882a593Smuzhiyun 	buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL);
92*4882a593Smuzhiyun 	if (!buf) {
93*4882a593Smuzhiyun 		dev_dbg(&dcdbas_pdev->dev,
94*4882a593Smuzhiyun 			"%s: failed to allocate memory size %lu\n",
95*4882a593Smuzhiyun 			__func__, size);
96*4882a593Smuzhiyun 		return -ENOMEM;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 	/* memory zeroed by dma_alloc_coherent */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (smi_data_buf)
101*4882a593Smuzhiyun 		memcpy(buf, smi_data_buf, smi_data_buf_size);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* free any existing buffer */
104*4882a593Smuzhiyun 	smi_data_buf_free();
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* set up new buffer for use */
107*4882a593Smuzhiyun 	smi_data_buf = buf;
108*4882a593Smuzhiyun 	smi_data_buf_handle = handle;
109*4882a593Smuzhiyun 	smi_data_buf_phys_addr = (u32) virt_to_phys(buf);
110*4882a593Smuzhiyun 	smi_data_buf_size = size;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
113*4882a593Smuzhiyun 		__func__, smi_data_buf_phys_addr, smi_data_buf_size);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
smi_data_buf_phys_addr_show(struct device * dev,struct device_attribute * attr,char * buf)118*4882a593Smuzhiyun static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
119*4882a593Smuzhiyun 					   struct device_attribute *attr,
120*4882a593Smuzhiyun 					   char *buf)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	return sprintf(buf, "%x\n", smi_data_buf_phys_addr);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
smi_data_buf_size_show(struct device * dev,struct device_attribute * attr,char * buf)125*4882a593Smuzhiyun static ssize_t smi_data_buf_size_show(struct device *dev,
126*4882a593Smuzhiyun 				      struct device_attribute *attr,
127*4882a593Smuzhiyun 				      char *buf)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", smi_data_buf_size);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
smi_data_buf_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)132*4882a593Smuzhiyun static ssize_t smi_data_buf_size_store(struct device *dev,
133*4882a593Smuzhiyun 				       struct device_attribute *attr,
134*4882a593Smuzhiyun 				       const char *buf, size_t count)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	unsigned long buf_size;
137*4882a593Smuzhiyun 	ssize_t ret;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	buf_size = simple_strtoul(buf, NULL, 10);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* make sure SMI data buffer is at least buf_size */
142*4882a593Smuzhiyun 	mutex_lock(&smi_data_lock);
143*4882a593Smuzhiyun 	ret = smi_data_buf_realloc(buf_size);
144*4882a593Smuzhiyun 	mutex_unlock(&smi_data_lock);
145*4882a593Smuzhiyun 	if (ret)
146*4882a593Smuzhiyun 		return ret;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return count;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
smi_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)151*4882a593Smuzhiyun static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
152*4882a593Smuzhiyun 			     struct bin_attribute *bin_attr,
153*4882a593Smuzhiyun 			     char *buf, loff_t pos, size_t count)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	ssize_t ret;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	mutex_lock(&smi_data_lock);
158*4882a593Smuzhiyun 	ret = memory_read_from_buffer(buf, count, &pos, smi_data_buf,
159*4882a593Smuzhiyun 					smi_data_buf_size);
160*4882a593Smuzhiyun 	mutex_unlock(&smi_data_lock);
161*4882a593Smuzhiyun 	return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
smi_data_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t pos,size_t count)164*4882a593Smuzhiyun static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
165*4882a593Smuzhiyun 			      struct bin_attribute *bin_attr,
166*4882a593Smuzhiyun 			      char *buf, loff_t pos, size_t count)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	ssize_t ret;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if ((pos + count) > max_smi_data_buf_size)
171*4882a593Smuzhiyun 		return -EINVAL;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	mutex_lock(&smi_data_lock);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	ret = smi_data_buf_realloc(pos + count);
176*4882a593Smuzhiyun 	if (ret)
177*4882a593Smuzhiyun 		goto out;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	memcpy(smi_data_buf + pos, buf, count);
180*4882a593Smuzhiyun 	ret = count;
181*4882a593Smuzhiyun out:
182*4882a593Smuzhiyun 	mutex_unlock(&smi_data_lock);
183*4882a593Smuzhiyun 	return ret;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
host_control_action_show(struct device * dev,struct device_attribute * attr,char * buf)186*4882a593Smuzhiyun static ssize_t host_control_action_show(struct device *dev,
187*4882a593Smuzhiyun 					struct device_attribute *attr,
188*4882a593Smuzhiyun 					char *buf)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", host_control_action);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
host_control_action_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)193*4882a593Smuzhiyun static ssize_t host_control_action_store(struct device *dev,
194*4882a593Smuzhiyun 					 struct device_attribute *attr,
195*4882a593Smuzhiyun 					 const char *buf, size_t count)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	ssize_t ret;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* make sure buffer is available for host control command */
200*4882a593Smuzhiyun 	mutex_lock(&smi_data_lock);
201*4882a593Smuzhiyun 	ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
202*4882a593Smuzhiyun 	mutex_unlock(&smi_data_lock);
203*4882a593Smuzhiyun 	if (ret)
204*4882a593Smuzhiyun 		return ret;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	host_control_action = simple_strtoul(buf, NULL, 10);
207*4882a593Smuzhiyun 	return count;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
host_control_smi_type_show(struct device * dev,struct device_attribute * attr,char * buf)210*4882a593Smuzhiyun static ssize_t host_control_smi_type_show(struct device *dev,
211*4882a593Smuzhiyun 					  struct device_attribute *attr,
212*4882a593Smuzhiyun 					  char *buf)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", host_control_smi_type);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
host_control_smi_type_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)217*4882a593Smuzhiyun static ssize_t host_control_smi_type_store(struct device *dev,
218*4882a593Smuzhiyun 					   struct device_attribute *attr,
219*4882a593Smuzhiyun 					   const char *buf, size_t count)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	host_control_smi_type = simple_strtoul(buf, NULL, 10);
222*4882a593Smuzhiyun 	return count;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
host_control_on_shutdown_show(struct device * dev,struct device_attribute * attr,char * buf)225*4882a593Smuzhiyun static ssize_t host_control_on_shutdown_show(struct device *dev,
226*4882a593Smuzhiyun 					     struct device_attribute *attr,
227*4882a593Smuzhiyun 					     char *buf)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", host_control_on_shutdown);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
host_control_on_shutdown_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)232*4882a593Smuzhiyun static ssize_t host_control_on_shutdown_store(struct device *dev,
233*4882a593Smuzhiyun 					      struct device_attribute *attr,
234*4882a593Smuzhiyun 					      const char *buf, size_t count)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
237*4882a593Smuzhiyun 	return count;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
raise_smi(void * par)240*4882a593Smuzhiyun static int raise_smi(void *par)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	struct smi_cmd *smi_cmd = par;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (smp_processor_id() != 0) {
245*4882a593Smuzhiyun 		dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
246*4882a593Smuzhiyun 			__func__);
247*4882a593Smuzhiyun 		return -EBUSY;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* generate SMI */
251*4882a593Smuzhiyun 	/* inb to force posted write through and make SMI happen now */
252*4882a593Smuzhiyun 	asm volatile (
253*4882a593Smuzhiyun 		"outb %b0,%w1\n"
254*4882a593Smuzhiyun 		"inb %w1"
255*4882a593Smuzhiyun 		: /* no output args */
256*4882a593Smuzhiyun 		: "a" (smi_cmd->command_code),
257*4882a593Smuzhiyun 		  "d" (smi_cmd->command_address),
258*4882a593Smuzhiyun 		  "b" (smi_cmd->ebx),
259*4882a593Smuzhiyun 		  "c" (smi_cmd->ecx)
260*4882a593Smuzhiyun 		: "memory"
261*4882a593Smuzhiyun 	);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return 0;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun  * dcdbas_smi_request: generate SMI request
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * Called with smi_data_lock.
269*4882a593Smuzhiyun  */
dcdbas_smi_request(struct smi_cmd * smi_cmd)270*4882a593Smuzhiyun int dcdbas_smi_request(struct smi_cmd *smi_cmd)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	int ret;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (smi_cmd->magic != SMI_CMD_MAGIC) {
275*4882a593Smuzhiyun 		dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
276*4882a593Smuzhiyun 			 __func__);
277*4882a593Smuzhiyun 		return -EBADR;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* SMI requires CPU 0 */
281*4882a593Smuzhiyun 	get_online_cpus();
282*4882a593Smuzhiyun 	ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
283*4882a593Smuzhiyun 	put_online_cpus();
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	return ret;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun  * smi_request_store:
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  * The valid values are:
292*4882a593Smuzhiyun  * 0: zero SMI data buffer
293*4882a593Smuzhiyun  * 1: generate calling interface SMI
294*4882a593Smuzhiyun  * 2: generate raw SMI
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * User application writes smi_cmd to smi_data before telling driver
297*4882a593Smuzhiyun  * to generate SMI.
298*4882a593Smuzhiyun  */
smi_request_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)299*4882a593Smuzhiyun static ssize_t smi_request_store(struct device *dev,
300*4882a593Smuzhiyun 				 struct device_attribute *attr,
301*4882a593Smuzhiyun 				 const char *buf, size_t count)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct smi_cmd *smi_cmd;
304*4882a593Smuzhiyun 	unsigned long val = simple_strtoul(buf, NULL, 10);
305*4882a593Smuzhiyun 	ssize_t ret;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	mutex_lock(&smi_data_lock);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (smi_data_buf_size < sizeof(struct smi_cmd)) {
310*4882a593Smuzhiyun 		ret = -ENODEV;
311*4882a593Smuzhiyun 		goto out;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 	smi_cmd = (struct smi_cmd *)smi_data_buf;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	switch (val) {
316*4882a593Smuzhiyun 	case 2:
317*4882a593Smuzhiyun 		/* Raw SMI */
318*4882a593Smuzhiyun 		ret = dcdbas_smi_request(smi_cmd);
319*4882a593Smuzhiyun 		if (!ret)
320*4882a593Smuzhiyun 			ret = count;
321*4882a593Smuzhiyun 		break;
322*4882a593Smuzhiyun 	case 1:
323*4882a593Smuzhiyun 		/*
324*4882a593Smuzhiyun 		 * Calling Interface SMI
325*4882a593Smuzhiyun 		 *
326*4882a593Smuzhiyun 		 * Provide physical address of command buffer field within
327*4882a593Smuzhiyun 		 * the struct smi_cmd to BIOS.
328*4882a593Smuzhiyun 		 *
329*4882a593Smuzhiyun 		 * Because the address that smi_cmd (smi_data_buf) points to
330*4882a593Smuzhiyun 		 * will be from memremap() of a non-memory address if WSMT
331*4882a593Smuzhiyun 		 * is present, we can't use virt_to_phys() on smi_cmd, so
332*4882a593Smuzhiyun 		 * we have to use the physical address that was saved when
333*4882a593Smuzhiyun 		 * the virtual address for smi_cmd was received.
334*4882a593Smuzhiyun 		 */
335*4882a593Smuzhiyun 		smi_cmd->ebx = smi_data_buf_phys_addr +
336*4882a593Smuzhiyun 				offsetof(struct smi_cmd, command_buffer);
337*4882a593Smuzhiyun 		ret = dcdbas_smi_request(smi_cmd);
338*4882a593Smuzhiyun 		if (!ret)
339*4882a593Smuzhiyun 			ret = count;
340*4882a593Smuzhiyun 		break;
341*4882a593Smuzhiyun 	case 0:
342*4882a593Smuzhiyun 		memset(smi_data_buf, 0, smi_data_buf_size);
343*4882a593Smuzhiyun 		ret = count;
344*4882a593Smuzhiyun 		break;
345*4882a593Smuzhiyun 	default:
346*4882a593Smuzhiyun 		ret = -EINVAL;
347*4882a593Smuzhiyun 		break;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun out:
351*4882a593Smuzhiyun 	mutex_unlock(&smi_data_lock);
352*4882a593Smuzhiyun 	return ret;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun EXPORT_SYMBOL(dcdbas_smi_request);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun  * host_control_smi: generate host control SMI
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * Caller must set up the host control command in smi_data_buf.
360*4882a593Smuzhiyun  */
host_control_smi(void)361*4882a593Smuzhiyun static int host_control_smi(void)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct apm_cmd *apm_cmd;
364*4882a593Smuzhiyun 	u8 *data;
365*4882a593Smuzhiyun 	unsigned long flags;
366*4882a593Smuzhiyun 	u32 num_ticks;
367*4882a593Smuzhiyun 	s8 cmd_status;
368*4882a593Smuzhiyun 	u8 index;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	apm_cmd = (struct apm_cmd *)smi_data_buf;
371*4882a593Smuzhiyun 	apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	switch (host_control_smi_type) {
374*4882a593Smuzhiyun 	case HC_SMITYPE_TYPE1:
375*4882a593Smuzhiyun 		spin_lock_irqsave(&rtc_lock, flags);
376*4882a593Smuzhiyun 		/* write SMI data buffer physical address */
377*4882a593Smuzhiyun 		data = (u8 *)&smi_data_buf_phys_addr;
378*4882a593Smuzhiyun 		for (index = PE1300_CMOS_CMD_STRUCT_PTR;
379*4882a593Smuzhiyun 		     index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
380*4882a593Smuzhiyun 		     index++, data++) {
381*4882a593Smuzhiyun 			outb(index,
382*4882a593Smuzhiyun 			     (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
383*4882a593Smuzhiyun 			outb(*data,
384*4882a593Smuzhiyun 			     (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
385*4882a593Smuzhiyun 		}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		/* first set status to -1 as called by spec */
388*4882a593Smuzhiyun 		cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
389*4882a593Smuzhiyun 		outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		/* generate SMM call */
392*4882a593Smuzhiyun 		outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
393*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rtc_lock, flags);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		/* wait a few to see if it executed */
396*4882a593Smuzhiyun 		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
397*4882a593Smuzhiyun 		while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
398*4882a593Smuzhiyun 		       == ESM_STATUS_CMD_UNSUCCESSFUL) {
399*4882a593Smuzhiyun 			num_ticks--;
400*4882a593Smuzhiyun 			if (num_ticks == EXPIRED_TIMER)
401*4882a593Smuzhiyun 				return -ETIME;
402*4882a593Smuzhiyun 		}
403*4882a593Smuzhiyun 		break;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	case HC_SMITYPE_TYPE2:
406*4882a593Smuzhiyun 	case HC_SMITYPE_TYPE3:
407*4882a593Smuzhiyun 		spin_lock_irqsave(&rtc_lock, flags);
408*4882a593Smuzhiyun 		/* write SMI data buffer physical address */
409*4882a593Smuzhiyun 		data = (u8 *)&smi_data_buf_phys_addr;
410*4882a593Smuzhiyun 		for (index = PE1400_CMOS_CMD_STRUCT_PTR;
411*4882a593Smuzhiyun 		     index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
412*4882a593Smuzhiyun 		     index++, data++) {
413*4882a593Smuzhiyun 			outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
414*4882a593Smuzhiyun 			outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		/* generate SMM call */
418*4882a593Smuzhiyun 		if (host_control_smi_type == HC_SMITYPE_TYPE3)
419*4882a593Smuzhiyun 			outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
420*4882a593Smuzhiyun 		else
421*4882a593Smuzhiyun 			outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		/* restore RTC index pointer since it was written to above */
424*4882a593Smuzhiyun 		CMOS_READ(RTC_REG_C);
425*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rtc_lock, flags);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		/* read control port back to serialize write */
428*4882a593Smuzhiyun 		cmd_status = inb(PE1400_APM_CONTROL_PORT);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		/* wait a few to see if it executed */
431*4882a593Smuzhiyun 		num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
432*4882a593Smuzhiyun 		while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
433*4882a593Smuzhiyun 			num_ticks--;
434*4882a593Smuzhiyun 			if (num_ticks == EXPIRED_TIMER)
435*4882a593Smuzhiyun 				return -ETIME;
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 		break;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	default:
440*4882a593Smuzhiyun 		dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
441*4882a593Smuzhiyun 			__func__, host_control_smi_type);
442*4882a593Smuzhiyun 		return -ENOSYS;
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /**
449*4882a593Smuzhiyun  * dcdbas_host_control: initiate host control
450*4882a593Smuzhiyun  *
451*4882a593Smuzhiyun  * This function is called by the driver after the system has
452*4882a593Smuzhiyun  * finished shutting down if the user application specified a
453*4882a593Smuzhiyun  * host control action to perform on shutdown.  It is safe to
454*4882a593Smuzhiyun  * use smi_data_buf at this point because the system has finished
455*4882a593Smuzhiyun  * shutting down and no userspace apps are running.
456*4882a593Smuzhiyun  */
dcdbas_host_control(void)457*4882a593Smuzhiyun static void dcdbas_host_control(void)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	struct apm_cmd *apm_cmd;
460*4882a593Smuzhiyun 	u8 action;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (host_control_action == HC_ACTION_NONE)
463*4882a593Smuzhiyun 		return;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	action = host_control_action;
466*4882a593Smuzhiyun 	host_control_action = HC_ACTION_NONE;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (!smi_data_buf) {
469*4882a593Smuzhiyun 		dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
470*4882a593Smuzhiyun 		return;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (smi_data_buf_size < sizeof(struct apm_cmd)) {
474*4882a593Smuzhiyun 		dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
475*4882a593Smuzhiyun 			__func__);
476*4882a593Smuzhiyun 		return;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	apm_cmd = (struct apm_cmd *)smi_data_buf;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* power off takes precedence */
482*4882a593Smuzhiyun 	if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
483*4882a593Smuzhiyun 		apm_cmd->command = ESM_APM_POWER_CYCLE;
484*4882a593Smuzhiyun 		apm_cmd->reserved = 0;
485*4882a593Smuzhiyun 		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
486*4882a593Smuzhiyun 		host_control_smi();
487*4882a593Smuzhiyun 	} else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
488*4882a593Smuzhiyun 		apm_cmd->command = ESM_APM_POWER_CYCLE;
489*4882a593Smuzhiyun 		apm_cmd->reserved = 0;
490*4882a593Smuzhiyun 		*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
491*4882a593Smuzhiyun 		host_control_smi();
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun /* WSMT */
496*4882a593Smuzhiyun 
checksum(u8 * buffer,u8 length)497*4882a593Smuzhiyun static u8 checksum(u8 *buffer, u8 length)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	u8 sum = 0;
500*4882a593Smuzhiyun 	u8 *end = buffer + length;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	while (buffer < end)
503*4882a593Smuzhiyun 		sum += *buffer++;
504*4882a593Smuzhiyun 	return sum;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
check_eps_table(u8 * addr)507*4882a593Smuzhiyun static inline struct smm_eps_table *check_eps_table(u8 *addr)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	struct smm_eps_table *eps = (struct smm_eps_table *)addr;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0)
512*4882a593Smuzhiyun 		return NULL;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	if (checksum(addr, eps->length) != 0)
515*4882a593Smuzhiyun 		return NULL;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	return eps;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
dcdbas_check_wsmt(void)520*4882a593Smuzhiyun static int dcdbas_check_wsmt(void)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	const struct dmi_device *dev = NULL;
523*4882a593Smuzhiyun 	struct acpi_table_wsmt *wsmt = NULL;
524*4882a593Smuzhiyun 	struct smm_eps_table *eps = NULL;
525*4882a593Smuzhiyun 	u64 bios_buf_paddr;
526*4882a593Smuzhiyun 	u64 remap_size;
527*4882a593Smuzhiyun 	u8 *addr;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt);
530*4882a593Smuzhiyun 	if (!wsmt)
531*4882a593Smuzhiyun 		return 0;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/* Check if WSMT ACPI table shows that protection is enabled */
534*4882a593Smuzhiyun 	if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) ||
535*4882a593Smuzhiyun 	    !(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
536*4882a593Smuzhiyun 		return 0;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/*
539*4882a593Smuzhiyun 	 * BIOS could provide the address/size of the protected buffer
540*4882a593Smuzhiyun 	 * in an SMBIOS string or in an EPS structure in 0xFxxxx.
541*4882a593Smuzhiyun 	 */
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Check SMBIOS for buffer address */
544*4882a593Smuzhiyun 	while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev)))
545*4882a593Smuzhiyun 		if (sscanf(dev->name, "30[%16llx;%8llx]", &bios_buf_paddr,
546*4882a593Smuzhiyun 		    &remap_size) == 2)
547*4882a593Smuzhiyun 			goto remap;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* Scan for EPS (entry point structure) */
550*4882a593Smuzhiyun 	for (addr = (u8 *)__va(0xf0000);
551*4882a593Smuzhiyun 	     addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
552*4882a593Smuzhiyun 	     addr += 16) {
553*4882a593Smuzhiyun 		eps = check_eps_table(addr);
554*4882a593Smuzhiyun 		if (eps)
555*4882a593Smuzhiyun 			break;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (!eps) {
559*4882a593Smuzhiyun 		dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no firmware buffer found\n");
560*4882a593Smuzhiyun 		return -ENODEV;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 	bios_buf_paddr = eps->smm_comm_buff_addr;
563*4882a593Smuzhiyun 	remap_size = eps->num_of_4k_pages * PAGE_SIZE;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun remap:
566*4882a593Smuzhiyun 	/*
567*4882a593Smuzhiyun 	 * Get physical address of buffer and map to virtual address.
568*4882a593Smuzhiyun 	 * Table gives size in 4K pages, regardless of actual system page size.
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	if (upper_32_bits(bios_buf_paddr + 8)) {
571*4882a593Smuzhiyun 		dev_warn(&dcdbas_pdev->dev, "found WSMT, but buffer address is above 4GB\n");
572*4882a593Smuzhiyun 		return -EINVAL;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 	/*
575*4882a593Smuzhiyun 	 * Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
576*4882a593Smuzhiyun 	 * bytes are used for a semaphore, not the data buffer itself).
577*4882a593Smuzhiyun 	 */
578*4882a593Smuzhiyun 	if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
579*4882a593Smuzhiyun 		remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	bios_buffer = memremap(bios_buf_paddr, remap_size, MEMREMAP_WB);
582*4882a593Smuzhiyun 	if (!bios_buffer) {
583*4882a593Smuzhiyun 		dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map buffer\n");
584*4882a593Smuzhiyun 		return -ENOMEM;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* First 8 bytes is for a semaphore, not part of the smi_data_buf */
588*4882a593Smuzhiyun 	smi_data_buf_phys_addr = bios_buf_paddr + 8;
589*4882a593Smuzhiyun 	smi_data_buf = bios_buffer + 8;
590*4882a593Smuzhiyun 	smi_data_buf_size = remap_size - 8;
591*4882a593Smuzhiyun 	max_smi_data_buf_size = smi_data_buf_size;
592*4882a593Smuzhiyun 	wsmt_enabled = true;
593*4882a593Smuzhiyun 	dev_info(&dcdbas_pdev->dev,
594*4882a593Smuzhiyun 		 "WSMT found, using firmware-provided SMI buffer.\n");
595*4882a593Smuzhiyun 	return 1;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /**
599*4882a593Smuzhiyun  * dcdbas_reboot_notify: handle reboot notification for host control
600*4882a593Smuzhiyun  */
dcdbas_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)601*4882a593Smuzhiyun static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
602*4882a593Smuzhiyun 				void *unused)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	switch (code) {
605*4882a593Smuzhiyun 	case SYS_DOWN:
606*4882a593Smuzhiyun 	case SYS_HALT:
607*4882a593Smuzhiyun 	case SYS_POWER_OFF:
608*4882a593Smuzhiyun 		if (host_control_on_shutdown) {
609*4882a593Smuzhiyun 			/* firmware is going to perform host control action */
610*4882a593Smuzhiyun 			printk(KERN_WARNING "Please wait for shutdown "
611*4882a593Smuzhiyun 			       "action to complete...\n");
612*4882a593Smuzhiyun 			dcdbas_host_control();
613*4882a593Smuzhiyun 		}
614*4882a593Smuzhiyun 		break;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	return NOTIFY_DONE;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun static struct notifier_block dcdbas_reboot_nb = {
621*4882a593Smuzhiyun 	.notifier_call = dcdbas_reboot_notify,
622*4882a593Smuzhiyun 	.next = NULL,
623*4882a593Smuzhiyun 	.priority = INT_MIN
624*4882a593Smuzhiyun };
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun static DCDBAS_BIN_ATTR_RW(smi_data);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun static struct bin_attribute *dcdbas_bin_attrs[] = {
629*4882a593Smuzhiyun 	&bin_attr_smi_data,
630*4882a593Smuzhiyun 	NULL
631*4882a593Smuzhiyun };
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
634*4882a593Smuzhiyun static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
635*4882a593Smuzhiyun static DCDBAS_DEV_ATTR_WO(smi_request);
636*4882a593Smuzhiyun static DCDBAS_DEV_ATTR_RW(host_control_action);
637*4882a593Smuzhiyun static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
638*4882a593Smuzhiyun static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun static struct attribute *dcdbas_dev_attrs[] = {
641*4882a593Smuzhiyun 	&dev_attr_smi_data_buf_size.attr,
642*4882a593Smuzhiyun 	&dev_attr_smi_data_buf_phys_addr.attr,
643*4882a593Smuzhiyun 	&dev_attr_smi_request.attr,
644*4882a593Smuzhiyun 	&dev_attr_host_control_action.attr,
645*4882a593Smuzhiyun 	&dev_attr_host_control_smi_type.attr,
646*4882a593Smuzhiyun 	&dev_attr_host_control_on_shutdown.attr,
647*4882a593Smuzhiyun 	NULL
648*4882a593Smuzhiyun };
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun static const struct attribute_group dcdbas_attr_group = {
651*4882a593Smuzhiyun 	.attrs = dcdbas_dev_attrs,
652*4882a593Smuzhiyun 	.bin_attrs = dcdbas_bin_attrs,
653*4882a593Smuzhiyun };
654*4882a593Smuzhiyun 
dcdbas_probe(struct platform_device * dev)655*4882a593Smuzhiyun static int dcdbas_probe(struct platform_device *dev)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	int error;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	host_control_action = HC_ACTION_NONE;
660*4882a593Smuzhiyun 	host_control_smi_type = HC_SMITYPE_NONE;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	dcdbas_pdev = dev;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* Check if ACPI WSMT table specifies protected SMI buffer address */
665*4882a593Smuzhiyun 	error = dcdbas_check_wsmt();
666*4882a593Smuzhiyun 	if (error < 0)
667*4882a593Smuzhiyun 		return error;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	/*
670*4882a593Smuzhiyun 	 * BIOS SMI calls require buffer addresses be in 32-bit address space.
671*4882a593Smuzhiyun 	 * This is done by setting the DMA mask below.
672*4882a593Smuzhiyun 	 */
673*4882a593Smuzhiyun 	error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
674*4882a593Smuzhiyun 	if (error)
675*4882a593Smuzhiyun 		return error;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
678*4882a593Smuzhiyun 	if (error)
679*4882a593Smuzhiyun 		return error;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	register_reboot_notifier(&dcdbas_reboot_nb);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	dev_info(&dev->dev, "%s (version %s)\n",
684*4882a593Smuzhiyun 		 DRIVER_DESCRIPTION, DRIVER_VERSION);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	return 0;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
dcdbas_remove(struct platform_device * dev)689*4882a593Smuzhiyun static int dcdbas_remove(struct platform_device *dev)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	unregister_reboot_notifier(&dcdbas_reboot_nb);
692*4882a593Smuzhiyun 	sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	return 0;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun static struct platform_driver dcdbas_driver = {
698*4882a593Smuzhiyun 	.driver		= {
699*4882a593Smuzhiyun 		.name	= DRIVER_NAME,
700*4882a593Smuzhiyun 	},
701*4882a593Smuzhiyun 	.probe		= dcdbas_probe,
702*4882a593Smuzhiyun 	.remove		= dcdbas_remove,
703*4882a593Smuzhiyun };
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun static const struct platform_device_info dcdbas_dev_info __initconst = {
706*4882a593Smuzhiyun 	.name		= DRIVER_NAME,
707*4882a593Smuzhiyun 	.id		= -1,
708*4882a593Smuzhiyun 	.dma_mask	= DMA_BIT_MASK(32),
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun static struct platform_device *dcdbas_pdev_reg;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /**
714*4882a593Smuzhiyun  * dcdbas_init: initialize driver
715*4882a593Smuzhiyun  */
dcdbas_init(void)716*4882a593Smuzhiyun static int __init dcdbas_init(void)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	int error;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	error = platform_driver_register(&dcdbas_driver);
721*4882a593Smuzhiyun 	if (error)
722*4882a593Smuzhiyun 		return error;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
725*4882a593Smuzhiyun 	if (IS_ERR(dcdbas_pdev_reg)) {
726*4882a593Smuzhiyun 		error = PTR_ERR(dcdbas_pdev_reg);
727*4882a593Smuzhiyun 		goto err_unregister_driver;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	return 0;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun  err_unregister_driver:
733*4882a593Smuzhiyun 	platform_driver_unregister(&dcdbas_driver);
734*4882a593Smuzhiyun 	return error;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun /**
738*4882a593Smuzhiyun  * dcdbas_exit: perform driver cleanup
739*4882a593Smuzhiyun  */
dcdbas_exit(void)740*4882a593Smuzhiyun static void __exit dcdbas_exit(void)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	/*
743*4882a593Smuzhiyun 	 * make sure functions that use dcdbas_pdev are called
744*4882a593Smuzhiyun 	 * before platform_device_unregister
745*4882a593Smuzhiyun 	 */
746*4882a593Smuzhiyun 	unregister_reboot_notifier(&dcdbas_reboot_nb);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/*
749*4882a593Smuzhiyun 	 * We have to free the buffer here instead of dcdbas_remove
750*4882a593Smuzhiyun 	 * because only in module exit function we can be sure that
751*4882a593Smuzhiyun 	 * all sysfs attributes belonging to this module have been
752*4882a593Smuzhiyun 	 * released.
753*4882a593Smuzhiyun 	 */
754*4882a593Smuzhiyun 	if (dcdbas_pdev)
755*4882a593Smuzhiyun 		smi_data_buf_free();
756*4882a593Smuzhiyun 	if (bios_buffer)
757*4882a593Smuzhiyun 		memunmap(bios_buffer);
758*4882a593Smuzhiyun 	platform_device_unregister(dcdbas_pdev_reg);
759*4882a593Smuzhiyun 	platform_driver_unregister(&dcdbas_driver);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun subsys_initcall_sync(dcdbas_init);
763*4882a593Smuzhiyun module_exit(dcdbas_exit);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
766*4882a593Smuzhiyun MODULE_VERSION(DRIVER_VERSION);
767*4882a593Smuzhiyun MODULE_AUTHOR("Dell Inc.");
768*4882a593Smuzhiyun MODULE_LICENSE("GPL");
769*4882a593Smuzhiyun /* Any System or BIOS claiming to be by Dell */
770*4882a593Smuzhiyun MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
771