xref: /OK3568_Linux_fs/kernel/drivers/misc/mei/pci-txe.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun  * Intel Management Engine Interface (Intel MEI) Linux driver
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/workqueue.h>
17*4882a593Smuzhiyun #include <linux/pm_domain.h>
18*4882a593Smuzhiyun #include <linux/pm_runtime.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/mei.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "mei_dev.h"
24*4882a593Smuzhiyun #include "hw-txe.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static const struct pci_device_id mei_txe_pci_tbl[] = {
27*4882a593Smuzhiyun 	{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
28*4882a593Smuzhiyun 	{PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	{0, }
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #ifdef CONFIG_PM
35*4882a593Smuzhiyun static inline void mei_txe_set_pm_domain(struct mei_device *dev);
36*4882a593Smuzhiyun static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
37*4882a593Smuzhiyun #else
mei_txe_set_pm_domain(struct mei_device * dev)38*4882a593Smuzhiyun static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
mei_txe_unset_pm_domain(struct mei_device * dev)39*4882a593Smuzhiyun static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
40*4882a593Smuzhiyun #endif /* CONFIG_PM */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun  * mei_txe_probe - Device Initialization Routine
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * @pdev: PCI device structure
46*4882a593Smuzhiyun  * @ent: entry in mei_txe_pci_tbl
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * Return: 0 on success, <0 on failure.
49*4882a593Smuzhiyun  */
mei_txe_probe(struct pci_dev * pdev,const struct pci_device_id * ent)50*4882a593Smuzhiyun static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	struct mei_device *dev;
53*4882a593Smuzhiyun 	struct mei_txe_hw *hw;
54*4882a593Smuzhiyun 	const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
55*4882a593Smuzhiyun 	int err;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	/* enable pci dev */
58*4882a593Smuzhiyun 	err = pcim_enable_device(pdev);
59*4882a593Smuzhiyun 	if (err) {
60*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to enable pci device.\n");
61*4882a593Smuzhiyun 		goto end;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	/* set PCI host mastering  */
64*4882a593Smuzhiyun 	pci_set_master(pdev);
65*4882a593Smuzhiyun 	/* pci request regions and mapping IO device memory for mei driver */
66*4882a593Smuzhiyun 	err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
67*4882a593Smuzhiyun 	if (err) {
68*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to get pci regions.\n");
69*4882a593Smuzhiyun 		goto end;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
73*4882a593Smuzhiyun 	if (err) {
74*4882a593Smuzhiyun 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
75*4882a593Smuzhiyun 		if (err) {
76*4882a593Smuzhiyun 			dev_err(&pdev->dev, "No suitable DMA available.\n");
77*4882a593Smuzhiyun 			goto end;
78*4882a593Smuzhiyun 		}
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* allocates and initializes the mei dev structure */
82*4882a593Smuzhiyun 	dev = mei_txe_dev_init(pdev);
83*4882a593Smuzhiyun 	if (!dev) {
84*4882a593Smuzhiyun 		err = -ENOMEM;
85*4882a593Smuzhiyun 		goto end;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 	hw = to_txe_hw(dev);
88*4882a593Smuzhiyun 	hw->mem_addr = pcim_iomap_table(pdev);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	pci_enable_msi(pdev);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* clear spurious interrupts */
93*4882a593Smuzhiyun 	mei_clear_interrupts(dev);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* request and enable interrupt  */
96*4882a593Smuzhiyun 	if (pci_dev_msi_enabled(pdev))
97*4882a593Smuzhiyun 		err = request_threaded_irq(pdev->irq,
98*4882a593Smuzhiyun 			NULL,
99*4882a593Smuzhiyun 			mei_txe_irq_thread_handler,
100*4882a593Smuzhiyun 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
101*4882a593Smuzhiyun 	else
102*4882a593Smuzhiyun 		err = request_threaded_irq(pdev->irq,
103*4882a593Smuzhiyun 			mei_txe_irq_quick_handler,
104*4882a593Smuzhiyun 			mei_txe_irq_thread_handler,
105*4882a593Smuzhiyun 			IRQF_SHARED, KBUILD_MODNAME, dev);
106*4882a593Smuzhiyun 	if (err) {
107*4882a593Smuzhiyun 		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
108*4882a593Smuzhiyun 			pdev->irq);
109*4882a593Smuzhiyun 		goto end;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (mei_start(dev)) {
113*4882a593Smuzhiyun 		dev_err(&pdev->dev, "init hw failure.\n");
114*4882a593Smuzhiyun 		err = -ENODEV;
115*4882a593Smuzhiyun 		goto release_irq;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
119*4882a593Smuzhiyun 	pm_runtime_use_autosuspend(&pdev->dev);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	err = mei_register(dev, &pdev->dev);
122*4882a593Smuzhiyun 	if (err)
123*4882a593Smuzhiyun 		goto stop;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	pci_set_drvdata(pdev, dev);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/*
128*4882a593Smuzhiyun 	 * MEI requires to resume from runtime suspend mode
129*4882a593Smuzhiyun 	 * in order to perform link reset flow upon system suspend.
130*4882a593Smuzhiyun 	 */
131*4882a593Smuzhiyun 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/*
134*4882a593Smuzhiyun 	 * TXE maps runtime suspend/resume to own power gating states,
135*4882a593Smuzhiyun 	 * hence we need to go around native PCI runtime service which
136*4882a593Smuzhiyun 	 * eventually brings the device into D3cold/hot state.
137*4882a593Smuzhiyun 	 * But the TXE device cannot wake up from D3 unlike from own
138*4882a593Smuzhiyun 	 * power gating. To get around PCI device native runtime pm,
139*4882a593Smuzhiyun 	 * TXE uses runtime pm domain handlers which take precedence.
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	mei_txe_set_pm_domain(dev);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	pm_runtime_put_noidle(&pdev->dev);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	return 0;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun stop:
148*4882a593Smuzhiyun 	mei_stop(dev);
149*4882a593Smuzhiyun release_irq:
150*4882a593Smuzhiyun 	mei_cancel_work(dev);
151*4882a593Smuzhiyun 	mei_disable_interrupts(dev);
152*4882a593Smuzhiyun 	free_irq(pdev->irq, dev);
153*4882a593Smuzhiyun end:
154*4882a593Smuzhiyun 	dev_err(&pdev->dev, "initialization failed.\n");
155*4882a593Smuzhiyun 	return err;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun  * mei_txe_remove - Device Shutdown Routine
160*4882a593Smuzhiyun  *
161*4882a593Smuzhiyun  * @pdev: PCI device structure
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  *  mei_txe_shutdown is called from the reboot notifier
164*4882a593Smuzhiyun  *  it's a simplified version of remove so we go down
165*4882a593Smuzhiyun  *  faster.
166*4882a593Smuzhiyun  */
mei_txe_shutdown(struct pci_dev * pdev)167*4882a593Smuzhiyun static void mei_txe_shutdown(struct pci_dev *pdev)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct mei_device *dev;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	dev = pci_get_drvdata(pdev);
172*4882a593Smuzhiyun 	if (!dev)
173*4882a593Smuzhiyun 		return;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "shutdown\n");
176*4882a593Smuzhiyun 	mei_stop(dev);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	mei_txe_unset_pm_domain(dev);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	mei_disable_interrupts(dev);
181*4882a593Smuzhiyun 	free_irq(pdev->irq, dev);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /**
185*4882a593Smuzhiyun  * mei_txe_remove - Device Removal Routine
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * @pdev: PCI device structure
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * mei_remove is called by the PCI subsystem to alert the driver
190*4882a593Smuzhiyun  * that it should release a PCI device.
191*4882a593Smuzhiyun  */
mei_txe_remove(struct pci_dev * pdev)192*4882a593Smuzhiyun static void mei_txe_remove(struct pci_dev *pdev)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct mei_device *dev;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	dev = pci_get_drvdata(pdev);
197*4882a593Smuzhiyun 	if (!dev) {
198*4882a593Smuzhiyun 		dev_err(&pdev->dev, "mei: dev == NULL\n");
199*4882a593Smuzhiyun 		return;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	pm_runtime_get_noresume(&pdev->dev);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	mei_stop(dev);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	mei_txe_unset_pm_domain(dev);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	mei_disable_interrupts(dev);
209*4882a593Smuzhiyun 	free_irq(pdev->irq, dev);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	mei_deregister(dev);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
mei_txe_pci_suspend(struct device * device)216*4882a593Smuzhiyun static int mei_txe_pci_suspend(struct device *device)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(device);
219*4882a593Smuzhiyun 	struct mei_device *dev = pci_get_drvdata(pdev);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (!dev)
222*4882a593Smuzhiyun 		return -ENODEV;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "suspend\n");
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	mei_stop(dev);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	mei_disable_interrupts(dev);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	free_irq(pdev->irq, dev);
231*4882a593Smuzhiyun 	pci_disable_msi(pdev);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
mei_txe_pci_resume(struct device * device)236*4882a593Smuzhiyun static int mei_txe_pci_resume(struct device *device)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(device);
239*4882a593Smuzhiyun 	struct mei_device *dev;
240*4882a593Smuzhiyun 	int err;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	dev = pci_get_drvdata(pdev);
243*4882a593Smuzhiyun 	if (!dev)
244*4882a593Smuzhiyun 		return -ENODEV;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	pci_enable_msi(pdev);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	mei_clear_interrupts(dev);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	/* request and enable interrupt */
251*4882a593Smuzhiyun 	if (pci_dev_msi_enabled(pdev))
252*4882a593Smuzhiyun 		err = request_threaded_irq(pdev->irq,
253*4882a593Smuzhiyun 			NULL,
254*4882a593Smuzhiyun 			mei_txe_irq_thread_handler,
255*4882a593Smuzhiyun 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
256*4882a593Smuzhiyun 	else
257*4882a593Smuzhiyun 		err = request_threaded_irq(pdev->irq,
258*4882a593Smuzhiyun 			mei_txe_irq_quick_handler,
259*4882a593Smuzhiyun 			mei_txe_irq_thread_handler,
260*4882a593Smuzhiyun 			IRQF_SHARED, KBUILD_MODNAME, dev);
261*4882a593Smuzhiyun 	if (err) {
262*4882a593Smuzhiyun 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
263*4882a593Smuzhiyun 				pdev->irq);
264*4882a593Smuzhiyun 		return err;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	err = mei_restart(dev);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return err;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #ifdef CONFIG_PM
mei_txe_pm_runtime_idle(struct device * device)274*4882a593Smuzhiyun static int mei_txe_pm_runtime_idle(struct device *device)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	struct mei_device *dev;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	dev_dbg(device, "rpm: txe: runtime_idle\n");
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	dev = dev_get_drvdata(device);
281*4882a593Smuzhiyun 	if (!dev)
282*4882a593Smuzhiyun 		return -ENODEV;
283*4882a593Smuzhiyun 	if (mei_write_is_idle(dev))
284*4882a593Smuzhiyun 		pm_runtime_autosuspend(device);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return -EBUSY;
287*4882a593Smuzhiyun }
mei_txe_pm_runtime_suspend(struct device * device)288*4882a593Smuzhiyun static int mei_txe_pm_runtime_suspend(struct device *device)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct mei_device *dev;
291*4882a593Smuzhiyun 	int ret;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	dev_dbg(device, "rpm: txe: runtime suspend\n");
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	dev = dev_get_drvdata(device);
296*4882a593Smuzhiyun 	if (!dev)
297*4882a593Smuzhiyun 		return -ENODEV;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	mutex_lock(&dev->device_lock);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (mei_write_is_idle(dev))
302*4882a593Smuzhiyun 		ret = mei_txe_aliveness_set_sync(dev, 0);
303*4882a593Smuzhiyun 	else
304*4882a593Smuzhiyun 		ret = -EAGAIN;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* keep irq on we are staying in D0 */
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	mutex_unlock(&dev->device_lock);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (ret && ret != -EAGAIN)
313*4882a593Smuzhiyun 		schedule_work(&dev->reset_work);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	return ret;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
mei_txe_pm_runtime_resume(struct device * device)318*4882a593Smuzhiyun static int mei_txe_pm_runtime_resume(struct device *device)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct mei_device *dev;
321*4882a593Smuzhiyun 	int ret;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	dev_dbg(device, "rpm: txe: runtime resume\n");
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	dev = dev_get_drvdata(device);
326*4882a593Smuzhiyun 	if (!dev)
327*4882a593Smuzhiyun 		return -ENODEV;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	mutex_lock(&dev->device_lock);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	mei_enable_interrupts(dev);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	ret = mei_txe_aliveness_set_sync(dev, 1);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	mutex_unlock(&dev->device_lock);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (ret)
340*4882a593Smuzhiyun 		schedule_work(&dev->reset_work);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	return ret;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun  * mei_txe_set_pm_domain - fill and set pm domain structure for device
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * @dev: mei_device
349*4882a593Smuzhiyun  */
mei_txe_set_pm_domain(struct mei_device * dev)350*4882a593Smuzhiyun static inline void mei_txe_set_pm_domain(struct mei_device *dev)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct pci_dev *pdev  = to_pci_dev(dev->dev);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (pdev->dev.bus && pdev->dev.bus->pm) {
355*4882a593Smuzhiyun 		dev->pg_domain.ops = *pdev->dev.bus->pm;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
358*4882a593Smuzhiyun 		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
359*4882a593Smuzhiyun 		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * mei_txe_unset_pm_domain - clean pm domain structure for device
367*4882a593Smuzhiyun  *
368*4882a593Smuzhiyun  * @dev: mei_device
369*4882a593Smuzhiyun  */
mei_txe_unset_pm_domain(struct mei_device * dev)370*4882a593Smuzhiyun static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	/* stop using pm callbacks if any */
373*4882a593Smuzhiyun 	dev_pm_domain_set(dev->dev, NULL);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun static const struct dev_pm_ops mei_txe_pm_ops = {
377*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
378*4882a593Smuzhiyun 				mei_txe_pci_resume)
379*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(
380*4882a593Smuzhiyun 		mei_txe_pm_runtime_suspend,
381*4882a593Smuzhiyun 		mei_txe_pm_runtime_resume,
382*4882a593Smuzhiyun 		mei_txe_pm_runtime_idle)
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun #define MEI_TXE_PM_OPS	(&mei_txe_pm_ops)
386*4882a593Smuzhiyun #else
387*4882a593Smuzhiyun #define MEI_TXE_PM_OPS	NULL
388*4882a593Smuzhiyun #endif /* CONFIG_PM */
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun  *  PCI driver structure
392*4882a593Smuzhiyun  */
393*4882a593Smuzhiyun static struct pci_driver mei_txe_driver = {
394*4882a593Smuzhiyun 	.name = KBUILD_MODNAME,
395*4882a593Smuzhiyun 	.id_table = mei_txe_pci_tbl,
396*4882a593Smuzhiyun 	.probe = mei_txe_probe,
397*4882a593Smuzhiyun 	.remove = mei_txe_remove,
398*4882a593Smuzhiyun 	.shutdown = mei_txe_shutdown,
399*4882a593Smuzhiyun 	.driver.pm = MEI_TXE_PM_OPS,
400*4882a593Smuzhiyun };
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun module_pci_driver(mei_txe_driver);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation");
405*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
406*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
407