xref: /OK3568_Linux_fs/kernel/drivers/pci/pcie/pme.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PCIe Native PME support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2007 - 2009 Intel Corp
6*4882a593Smuzhiyun  * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
7*4882a593Smuzhiyun  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define dev_fmt(fmt) "PME: " fmt
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/device.h>
19*4882a593Smuzhiyun #include <linux/pm_runtime.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "../pci.h"
22*4882a593Smuzhiyun #include "portdrv.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
26*4882a593Smuzhiyun  * causes the PCIe port driver to use INTx interrupts only, but it turns out
27*4882a593Smuzhiyun  * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
28*4882a593Smuzhiyun  * wake-up from system sleep states.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun bool pcie_pme_msi_disabled;
31*4882a593Smuzhiyun 
pcie_pme_setup(char * str)32*4882a593Smuzhiyun static int __init pcie_pme_setup(char *str)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	if (!strncmp(str, "nomsi", 5))
35*4882a593Smuzhiyun 		pcie_pme_msi_disabled = true;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	return 1;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun __setup("pcie_pme=", pcie_pme_setup);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct pcie_pme_service_data {
42*4882a593Smuzhiyun 	spinlock_t lock;
43*4882a593Smuzhiyun 	struct pcie_device *srv;
44*4882a593Smuzhiyun 	struct work_struct work;
45*4882a593Smuzhiyun 	bool noirq; /* If set, keep the PME interrupt disabled. */
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun  * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
50*4882a593Smuzhiyun  * @dev: PCIe root port or event collector.
51*4882a593Smuzhiyun  * @enable: Enable or disable the interrupt.
52*4882a593Smuzhiyun  */
pcie_pme_interrupt_enable(struct pci_dev * dev,bool enable)53*4882a593Smuzhiyun void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	if (enable)
56*4882a593Smuzhiyun 		pcie_capability_set_word(dev, PCI_EXP_RTCTL,
57*4882a593Smuzhiyun 					 PCI_EXP_RTCTL_PMEIE);
58*4882a593Smuzhiyun 	else
59*4882a593Smuzhiyun 		pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
60*4882a593Smuzhiyun 					   PCI_EXP_RTCTL_PMEIE);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /**
64*4882a593Smuzhiyun  * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
65*4882a593Smuzhiyun  * @bus: PCI bus to scan.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * Scan given PCI bus and all buses under it for devices asserting PME#.
68*4882a593Smuzhiyun  */
pcie_pme_walk_bus(struct pci_bus * bus)69*4882a593Smuzhiyun static bool pcie_pme_walk_bus(struct pci_bus *bus)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct pci_dev *dev;
72*4882a593Smuzhiyun 	bool ret = false;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
75*4882a593Smuzhiyun 		/* Skip PCIe devices in case we started from a root port. */
76*4882a593Smuzhiyun 		if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
77*4882a593Smuzhiyun 			if (dev->pme_poll)
78*4882a593Smuzhiyun 				dev->pme_poll = false;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 			pci_wakeup_event(dev);
81*4882a593Smuzhiyun 			pm_request_resume(&dev->dev);
82*4882a593Smuzhiyun 			ret = true;
83*4882a593Smuzhiyun 		}
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
86*4882a593Smuzhiyun 			ret = true;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return ret;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
94*4882a593Smuzhiyun  * @bus: Secondary bus of the bridge.
95*4882a593Smuzhiyun  * @devfn: Device/function number to check.
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
98*4882a593Smuzhiyun  * PCIe PME message.  In such that case the bridge should use the Requester ID
99*4882a593Smuzhiyun  * of device/function number 0 on its secondary bus.
100*4882a593Smuzhiyun  */
pcie_pme_from_pci_bridge(struct pci_bus * bus,u8 devfn)101*4882a593Smuzhiyun static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct pci_dev *dev;
104*4882a593Smuzhiyun 	bool found = false;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (devfn)
107*4882a593Smuzhiyun 		return false;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	dev = pci_dev_get(bus->self);
110*4882a593Smuzhiyun 	if (!dev)
111*4882a593Smuzhiyun 		return false;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
114*4882a593Smuzhiyun 		down_read(&pci_bus_sem);
115*4882a593Smuzhiyun 		if (pcie_pme_walk_bus(bus))
116*4882a593Smuzhiyun 			found = true;
117*4882a593Smuzhiyun 		up_read(&pci_bus_sem);
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	pci_dev_put(dev);
121*4882a593Smuzhiyun 	return found;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun  * pcie_pme_handle_request - Find device that generated PME and handle it.
126*4882a593Smuzhiyun  * @port: Root port or event collector that generated the PME interrupt.
127*4882a593Smuzhiyun  * @req_id: PCIe Requester ID of the device that generated the PME.
128*4882a593Smuzhiyun  */
pcie_pme_handle_request(struct pci_dev * port,u16 req_id)129*4882a593Smuzhiyun static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	u8 busnr = req_id >> 8, devfn = req_id & 0xff;
132*4882a593Smuzhiyun 	struct pci_bus *bus;
133*4882a593Smuzhiyun 	struct pci_dev *dev;
134*4882a593Smuzhiyun 	bool found = false;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* First, check if the PME is from the root port itself. */
137*4882a593Smuzhiyun 	if (port->devfn == devfn && port->bus->number == busnr) {
138*4882a593Smuzhiyun 		if (port->pme_poll)
139*4882a593Smuzhiyun 			port->pme_poll = false;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		if (pci_check_pme_status(port)) {
142*4882a593Smuzhiyun 			pm_request_resume(&port->dev);
143*4882a593Smuzhiyun 			found = true;
144*4882a593Smuzhiyun 		} else {
145*4882a593Smuzhiyun 			/*
146*4882a593Smuzhiyun 			 * Apparently, the root port generated the PME on behalf
147*4882a593Smuzhiyun 			 * of a non-PCIe device downstream.  If this is done by
148*4882a593Smuzhiyun 			 * a root port, the Requester ID field in its status
149*4882a593Smuzhiyun 			 * register may contain either the root port's, or the
150*4882a593Smuzhiyun 			 * source device's information (PCI Express Base
151*4882a593Smuzhiyun 			 * Specification, Rev. 2.0, Section 6.1.9).
152*4882a593Smuzhiyun 			 */
153*4882a593Smuzhiyun 			down_read(&pci_bus_sem);
154*4882a593Smuzhiyun 			found = pcie_pme_walk_bus(port->subordinate);
155*4882a593Smuzhiyun 			up_read(&pci_bus_sem);
156*4882a593Smuzhiyun 		}
157*4882a593Smuzhiyun 		goto out;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* Second, find the bus the source device is on. */
161*4882a593Smuzhiyun 	bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
162*4882a593Smuzhiyun 	if (!bus)
163*4882a593Smuzhiyun 		goto out;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* Next, check if the PME is from a PCIe-PCI bridge. */
166*4882a593Smuzhiyun 	found = pcie_pme_from_pci_bridge(bus, devfn);
167*4882a593Smuzhiyun 	if (found)
168*4882a593Smuzhiyun 		goto out;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* Finally, try to find the PME source on the bus. */
171*4882a593Smuzhiyun 	down_read(&pci_bus_sem);
172*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
173*4882a593Smuzhiyun 		pci_dev_get(dev);
174*4882a593Smuzhiyun 		if (dev->devfn == devfn) {
175*4882a593Smuzhiyun 			found = true;
176*4882a593Smuzhiyun 			break;
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 		pci_dev_put(dev);
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 	up_read(&pci_bus_sem);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (found) {
183*4882a593Smuzhiyun 		/* The device is there, but we have to check its PME status. */
184*4882a593Smuzhiyun 		found = pci_check_pme_status(dev);
185*4882a593Smuzhiyun 		if (found) {
186*4882a593Smuzhiyun 			if (dev->pme_poll)
187*4882a593Smuzhiyun 				dev->pme_poll = false;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 			pci_wakeup_event(dev);
190*4882a593Smuzhiyun 			pm_request_resume(&dev->dev);
191*4882a593Smuzhiyun 		}
192*4882a593Smuzhiyun 		pci_dev_put(dev);
193*4882a593Smuzhiyun 	} else if (devfn) {
194*4882a593Smuzhiyun 		/*
195*4882a593Smuzhiyun 		 * The device is not there, but we can still try to recover by
196*4882a593Smuzhiyun 		 * assuming that the PME was reported by a PCIe-PCI bridge that
197*4882a593Smuzhiyun 		 * used devfn different from zero.
198*4882a593Smuzhiyun 		 */
199*4882a593Smuzhiyun 		pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n",
200*4882a593Smuzhiyun 			 busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
201*4882a593Smuzhiyun 		found = pcie_pme_from_pci_bridge(bus, 0);
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun  out:
205*4882a593Smuzhiyun 	if (!found)
206*4882a593Smuzhiyun 		pci_info(port, "Spurious native interrupt!\n");
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun  * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
211*4882a593Smuzhiyun  * @work: Work structure giving access to service data.
212*4882a593Smuzhiyun  */
pcie_pme_work_fn(struct work_struct * work)213*4882a593Smuzhiyun static void pcie_pme_work_fn(struct work_struct *work)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct pcie_pme_service_data *data =
216*4882a593Smuzhiyun 			container_of(work, struct pcie_pme_service_data, work);
217*4882a593Smuzhiyun 	struct pci_dev *port = data->srv->port;
218*4882a593Smuzhiyun 	u32 rtsta;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	spin_lock_irq(&data->lock);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	for (;;) {
223*4882a593Smuzhiyun 		if (data->noirq)
224*4882a593Smuzhiyun 			break;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
227*4882a593Smuzhiyun 		if (rtsta == (u32) ~0)
228*4882a593Smuzhiyun 			break;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		if (rtsta & PCI_EXP_RTSTA_PME) {
231*4882a593Smuzhiyun 			/*
232*4882a593Smuzhiyun 			 * Clear PME status of the port.  If there are other
233*4882a593Smuzhiyun 			 * pending PMEs, the status will be set again.
234*4882a593Smuzhiyun 			 */
235*4882a593Smuzhiyun 			pcie_clear_root_pme_status(port);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 			spin_unlock_irq(&data->lock);
238*4882a593Smuzhiyun 			pcie_pme_handle_request(port, rtsta & 0xffff);
239*4882a593Smuzhiyun 			spin_lock_irq(&data->lock);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 			continue;
242*4882a593Smuzhiyun 		}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		/* No need to loop if there are no more PMEs pending. */
245*4882a593Smuzhiyun 		if (!(rtsta & PCI_EXP_RTSTA_PENDING))
246*4882a593Smuzhiyun 			break;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		spin_unlock_irq(&data->lock);
249*4882a593Smuzhiyun 		cpu_relax();
250*4882a593Smuzhiyun 		spin_lock_irq(&data->lock);
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (!data->noirq)
254*4882a593Smuzhiyun 		pcie_pme_interrupt_enable(port, true);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	spin_unlock_irq(&data->lock);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun  * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
261*4882a593Smuzhiyun  * @irq: Interrupt vector.
262*4882a593Smuzhiyun  * @context: Interrupt context pointer.
263*4882a593Smuzhiyun  */
pcie_pme_irq(int irq,void * context)264*4882a593Smuzhiyun static irqreturn_t pcie_pme_irq(int irq, void *context)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct pci_dev *port;
267*4882a593Smuzhiyun 	struct pcie_pme_service_data *data;
268*4882a593Smuzhiyun 	u32 rtsta;
269*4882a593Smuzhiyun 	unsigned long flags;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	port = ((struct pcie_device *)context)->port;
272*4882a593Smuzhiyun 	data = get_service_data((struct pcie_device *)context);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	spin_lock_irqsave(&data->lock, flags);
275*4882a593Smuzhiyun 	pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
278*4882a593Smuzhiyun 		spin_unlock_irqrestore(&data->lock, flags);
279*4882a593Smuzhiyun 		return IRQ_NONE;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	pcie_pme_interrupt_enable(port, false);
283*4882a593Smuzhiyun 	spin_unlock_irqrestore(&data->lock, flags);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* We don't use pm_wq, because it's freezable. */
286*4882a593Smuzhiyun 	schedule_work(&data->work);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return IRQ_HANDLED;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun  * pcie_pme_can_wakeup - Set the wakeup capability flag.
293*4882a593Smuzhiyun  * @dev: PCI device to handle.
294*4882a593Smuzhiyun  * @ign: Ignored.
295*4882a593Smuzhiyun  */
pcie_pme_can_wakeup(struct pci_dev * dev,void * ign)296*4882a593Smuzhiyun static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	device_set_wakeup_capable(&dev->dev, true);
299*4882a593Smuzhiyun 	return 0;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun  * pcie_pme_mark_devices - Set the wakeup flag for devices below a port.
304*4882a593Smuzhiyun  * @port: PCIe root port or event collector to handle.
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * For each device below given root port, including the port itself (or for each
307*4882a593Smuzhiyun  * root complex integrated endpoint if @port is a root complex event collector)
308*4882a593Smuzhiyun  * set the flag indicating that it can signal run-time wake-up events.
309*4882a593Smuzhiyun  */
pcie_pme_mark_devices(struct pci_dev * port)310*4882a593Smuzhiyun static void pcie_pme_mark_devices(struct pci_dev *port)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	pcie_pme_can_wakeup(port, NULL);
313*4882a593Smuzhiyun 	if (port->subordinate)
314*4882a593Smuzhiyun 		pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /**
318*4882a593Smuzhiyun  * pcie_pme_probe - Initialize PCIe PME service for given root port.
319*4882a593Smuzhiyun  * @srv: PCIe service to initialize.
320*4882a593Smuzhiyun  */
pcie_pme_probe(struct pcie_device * srv)321*4882a593Smuzhiyun static int pcie_pme_probe(struct pcie_device *srv)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct pci_dev *port;
324*4882a593Smuzhiyun 	struct pcie_pme_service_data *data;
325*4882a593Smuzhiyun 	int ret;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
328*4882a593Smuzhiyun 	if (!data)
329*4882a593Smuzhiyun 		return -ENOMEM;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	spin_lock_init(&data->lock);
332*4882a593Smuzhiyun 	INIT_WORK(&data->work, pcie_pme_work_fn);
333*4882a593Smuzhiyun 	data->srv = srv;
334*4882a593Smuzhiyun 	set_service_data(srv, data);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	port = srv->port;
337*4882a593Smuzhiyun 	pcie_pme_interrupt_enable(port, false);
338*4882a593Smuzhiyun 	pcie_clear_root_pme_status(port);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
341*4882a593Smuzhiyun 	if (ret) {
342*4882a593Smuzhiyun 		kfree(data);
343*4882a593Smuzhiyun 		return ret;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	pci_info(port, "Signaling with IRQ %d\n", srv->irq);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	pcie_pme_mark_devices(port);
349*4882a593Smuzhiyun 	pcie_pme_interrupt_enable(port, true);
350*4882a593Smuzhiyun 	return 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
pcie_pme_check_wakeup(struct pci_bus * bus)353*4882a593Smuzhiyun static bool pcie_pme_check_wakeup(struct pci_bus *bus)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct pci_dev *dev;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (!bus)
358*4882a593Smuzhiyun 		return false;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list)
361*4882a593Smuzhiyun 		if (device_may_wakeup(&dev->dev)
362*4882a593Smuzhiyun 		    || pcie_pme_check_wakeup(dev->subordinate))
363*4882a593Smuzhiyun 			return true;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	return false;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
pcie_pme_disable_interrupt(struct pci_dev * port,struct pcie_pme_service_data * data)368*4882a593Smuzhiyun static void pcie_pme_disable_interrupt(struct pci_dev *port,
369*4882a593Smuzhiyun 				       struct pcie_pme_service_data *data)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	spin_lock_irq(&data->lock);
372*4882a593Smuzhiyun 	pcie_pme_interrupt_enable(port, false);
373*4882a593Smuzhiyun 	pcie_clear_root_pme_status(port);
374*4882a593Smuzhiyun 	data->noirq = true;
375*4882a593Smuzhiyun 	spin_unlock_irq(&data->lock);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun  * pcie_pme_suspend - Suspend PCIe PME service device.
380*4882a593Smuzhiyun  * @srv: PCIe service device to suspend.
381*4882a593Smuzhiyun  */
pcie_pme_suspend(struct pcie_device * srv)382*4882a593Smuzhiyun static int pcie_pme_suspend(struct pcie_device *srv)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct pcie_pme_service_data *data = get_service_data(srv);
385*4882a593Smuzhiyun 	struct pci_dev *port = srv->port;
386*4882a593Smuzhiyun 	bool wakeup;
387*4882a593Smuzhiyun 	int ret;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (device_may_wakeup(&port->dev)) {
390*4882a593Smuzhiyun 		wakeup = true;
391*4882a593Smuzhiyun 	} else {
392*4882a593Smuzhiyun 		down_read(&pci_bus_sem);
393*4882a593Smuzhiyun 		wakeup = pcie_pme_check_wakeup(port->subordinate);
394*4882a593Smuzhiyun 		up_read(&pci_bus_sem);
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 	if (wakeup) {
397*4882a593Smuzhiyun 		ret = enable_irq_wake(srv->irq);
398*4882a593Smuzhiyun 		if (!ret)
399*4882a593Smuzhiyun 			return 0;
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	pcie_pme_disable_interrupt(port, data);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	synchronize_irq(srv->irq);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun /**
410*4882a593Smuzhiyun  * pcie_pme_resume - Resume PCIe PME service device.
411*4882a593Smuzhiyun  * @srv: PCIe service device to resume.
412*4882a593Smuzhiyun  */
pcie_pme_resume(struct pcie_device * srv)413*4882a593Smuzhiyun static int pcie_pme_resume(struct pcie_device *srv)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct pcie_pme_service_data *data = get_service_data(srv);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	spin_lock_irq(&data->lock);
418*4882a593Smuzhiyun 	if (data->noirq) {
419*4882a593Smuzhiyun 		struct pci_dev *port = srv->port;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		pcie_clear_root_pme_status(port);
422*4882a593Smuzhiyun 		pcie_pme_interrupt_enable(port, true);
423*4882a593Smuzhiyun 		data->noirq = false;
424*4882a593Smuzhiyun 	} else {
425*4882a593Smuzhiyun 		disable_irq_wake(srv->irq);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 	spin_unlock_irq(&data->lock);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun  * pcie_pme_remove - Prepare PCIe PME service device for removal.
434*4882a593Smuzhiyun  * @srv: PCIe service device to remove.
435*4882a593Smuzhiyun  */
pcie_pme_remove(struct pcie_device * srv)436*4882a593Smuzhiyun static void pcie_pme_remove(struct pcie_device *srv)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct pcie_pme_service_data *data = get_service_data(srv);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	pcie_pme_disable_interrupt(srv->port, data);
441*4882a593Smuzhiyun 	free_irq(srv->irq, srv);
442*4882a593Smuzhiyun 	cancel_work_sync(&data->work);
443*4882a593Smuzhiyun 	kfree(data);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun static struct pcie_port_service_driver pcie_pme_driver = {
447*4882a593Smuzhiyun 	.name		= "pcie_pme",
448*4882a593Smuzhiyun 	.port_type	= PCI_EXP_TYPE_ROOT_PORT,
449*4882a593Smuzhiyun 	.service	= PCIE_PORT_SERVICE_PME,
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	.probe		= pcie_pme_probe,
452*4882a593Smuzhiyun 	.suspend	= pcie_pme_suspend,
453*4882a593Smuzhiyun 	.resume		= pcie_pme_resume,
454*4882a593Smuzhiyun 	.remove		= pcie_pme_remove,
455*4882a593Smuzhiyun };
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /**
458*4882a593Smuzhiyun  * pcie_pme_service_init - Register the PCIe PME service driver.
459*4882a593Smuzhiyun  */
pcie_pme_init(void)460*4882a593Smuzhiyun int __init pcie_pme_init(void)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	return pcie_port_service_register(&pcie_pme_driver);
463*4882a593Smuzhiyun }
464