1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Device wakeirq helper functions */
3*4882a593Smuzhiyun #include <linux/device.h>
4*4882a593Smuzhiyun #include <linux/interrupt.h>
5*4882a593Smuzhiyun #include <linux/irq.h>
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/pm_runtime.h>
8*4882a593Smuzhiyun #include <linux/pm_wakeirq.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "power.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
14*4882a593Smuzhiyun * @dev: Device entry
15*4882a593Smuzhiyun * @irq: Device wake-up capable interrupt
16*4882a593Smuzhiyun * @wirq: Wake irq specific data
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Internal function to attach either a device IO interrupt or a
19*4882a593Smuzhiyun * dedicated wake-up interrupt as a wake IRQ.
20*4882a593Smuzhiyun */
dev_pm_attach_wake_irq(struct device * dev,int irq,struct wake_irq * wirq)21*4882a593Smuzhiyun static int dev_pm_attach_wake_irq(struct device *dev, int irq,
22*4882a593Smuzhiyun struct wake_irq *wirq)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun unsigned long flags;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun if (!dev || !wirq)
27*4882a593Smuzhiyun return -EINVAL;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun spin_lock_irqsave(&dev->power.lock, flags);
30*4882a593Smuzhiyun if (dev_WARN_ONCE(dev, dev->power.wakeirq,
31*4882a593Smuzhiyun "wake irq already initialized\n")) {
32*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
33*4882a593Smuzhiyun return -EEXIST;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun dev->power.wakeirq = wirq;
37*4882a593Smuzhiyun device_wakeup_attach_irq(dev, wirq);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
40*4882a593Smuzhiyun return 0;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
45*4882a593Smuzhiyun * @dev: Device entry
46*4882a593Smuzhiyun * @irq: Device IO interrupt
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
49*4882a593Smuzhiyun * automatically configured for wake-up from suspend based
50*4882a593Smuzhiyun * on the device specific sysfs wakeup entry. Typically called
51*4882a593Smuzhiyun * during driver probe after calling device_init_wakeup().
52*4882a593Smuzhiyun */
dev_pm_set_wake_irq(struct device * dev,int irq)53*4882a593Smuzhiyun int dev_pm_set_wake_irq(struct device *dev, int irq)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct wake_irq *wirq;
56*4882a593Smuzhiyun int err;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (irq < 0)
59*4882a593Smuzhiyun return -EINVAL;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
62*4882a593Smuzhiyun if (!wirq)
63*4882a593Smuzhiyun return -ENOMEM;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun wirq->dev = dev;
66*4882a593Smuzhiyun wirq->irq = irq;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun err = dev_pm_attach_wake_irq(dev, irq, wirq);
69*4882a593Smuzhiyun if (err)
70*4882a593Smuzhiyun kfree(wirq);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun return err;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
78*4882a593Smuzhiyun * @dev: Device entry
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * Detach a device wake IRQ and free resources.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * Note that it's OK for drivers to call this without calling
83*4882a593Smuzhiyun * dev_pm_set_wake_irq() as all the driver instances may not have
84*4882a593Smuzhiyun * a wake IRQ configured. This avoid adding wake IRQ specific
85*4882a593Smuzhiyun * checks into the drivers.
86*4882a593Smuzhiyun */
dev_pm_clear_wake_irq(struct device * dev)87*4882a593Smuzhiyun void dev_pm_clear_wake_irq(struct device *dev)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct wake_irq *wirq = dev->power.wakeirq;
90*4882a593Smuzhiyun unsigned long flags;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (!wirq)
93*4882a593Smuzhiyun return;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun spin_lock_irqsave(&dev->power.lock, flags);
96*4882a593Smuzhiyun device_wakeup_detach_irq(dev);
97*4882a593Smuzhiyun dev->power.wakeirq = NULL;
98*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
101*4882a593Smuzhiyun free_irq(wirq->irq, wirq);
102*4882a593Smuzhiyun wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun kfree(wirq->name);
105*4882a593Smuzhiyun kfree(wirq);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
111*4882a593Smuzhiyun * @irq: Device specific dedicated wake-up interrupt
112*4882a593Smuzhiyun * @_wirq: Wake IRQ data
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * Some devices have a separate wake-up interrupt in addition to the
115*4882a593Smuzhiyun * device IO interrupt. The wake-up interrupt signals that a device
116*4882a593Smuzhiyun * should be woken up from it's idle state. This handler uses device
117*4882a593Smuzhiyun * specific pm_runtime functions to wake the device, and then it's
118*4882a593Smuzhiyun * up to the device to do whatever it needs to. Note that as the
119*4882a593Smuzhiyun * device may need to restore context and start up regulators, we
120*4882a593Smuzhiyun * use a threaded IRQ.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * Also note that we are not resending the lost device interrupts.
123*4882a593Smuzhiyun * We assume that the wake-up interrupt just needs to wake-up the
124*4882a593Smuzhiyun * device, and then device's pm_runtime_resume() can deal with the
125*4882a593Smuzhiyun * situation.
126*4882a593Smuzhiyun */
handle_threaded_wake_irq(int irq,void * _wirq)127*4882a593Smuzhiyun static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct wake_irq *wirq = _wirq;
130*4882a593Smuzhiyun int res;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Maybe abort suspend? */
133*4882a593Smuzhiyun if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
134*4882a593Smuzhiyun pm_wakeup_event(wirq->dev, 0);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return IRQ_HANDLED;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* We don't want RPM_ASYNC or RPM_NOWAIT here */
140*4882a593Smuzhiyun res = pm_runtime_resume(wirq->dev);
141*4882a593Smuzhiyun if (res < 0)
142*4882a593Smuzhiyun dev_warn(wirq->dev,
143*4882a593Smuzhiyun "wake IRQ with no resume: %i\n", res);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun return IRQ_HANDLED;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /**
149*4882a593Smuzhiyun * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
150*4882a593Smuzhiyun * @dev: Device entry
151*4882a593Smuzhiyun * @irq: Device wake-up interrupt
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * Unless your hardware has separate wake-up interrupts in addition
154*4882a593Smuzhiyun * to the device IO interrupts, you don't need this.
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * Sets up a threaded interrupt handler for a device that has
157*4882a593Smuzhiyun * a dedicated wake-up interrupt in addition to the device IO
158*4882a593Smuzhiyun * interrupt.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * The interrupt starts disabled, and needs to be managed for
161*4882a593Smuzhiyun * the device by the bus code or the device driver using
162*4882a593Smuzhiyun * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
163*4882a593Smuzhiyun * functions.
164*4882a593Smuzhiyun */
dev_pm_set_dedicated_wake_irq(struct device * dev,int irq)165*4882a593Smuzhiyun int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct wake_irq *wirq;
168*4882a593Smuzhiyun int err;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (irq < 0)
171*4882a593Smuzhiyun return -EINVAL;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
174*4882a593Smuzhiyun if (!wirq)
175*4882a593Smuzhiyun return -ENOMEM;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
178*4882a593Smuzhiyun if (!wirq->name) {
179*4882a593Smuzhiyun err = -ENOMEM;
180*4882a593Smuzhiyun goto err_free;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun wirq->dev = dev;
184*4882a593Smuzhiyun wirq->irq = irq;
185*4882a593Smuzhiyun irq_set_status_flags(irq, IRQ_NOAUTOEN);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
188*4882a593Smuzhiyun irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Consumer device may need to power up and restore state
192*4882a593Smuzhiyun * so we use a threaded irq.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
195*4882a593Smuzhiyun IRQF_ONESHOT, wirq->name, wirq);
196*4882a593Smuzhiyun if (err)
197*4882a593Smuzhiyun goto err_free_name;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun err = dev_pm_attach_wake_irq(dev, irq, wirq);
200*4882a593Smuzhiyun if (err)
201*4882a593Smuzhiyun goto err_free_irq;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return err;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun err_free_irq:
208*4882a593Smuzhiyun free_irq(irq, wirq);
209*4882a593Smuzhiyun err_free_name:
210*4882a593Smuzhiyun kfree(wirq->name);
211*4882a593Smuzhiyun err_free:
212*4882a593Smuzhiyun kfree(wirq);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return err;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun * dev_pm_enable_wake_irq - Enable device wake-up interrupt
220*4882a593Smuzhiyun * @dev: Device
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * Optionally called from the bus code or the device driver for
223*4882a593Smuzhiyun * runtime_resume() to override the PM runtime core managed wake-up
224*4882a593Smuzhiyun * interrupt handling to enable the wake-up interrupt.
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Note that for runtime_suspend()) the wake-up interrupts
227*4882a593Smuzhiyun * should be unconditionally enabled unlike for suspend()
228*4882a593Smuzhiyun * that is conditional.
229*4882a593Smuzhiyun */
dev_pm_enable_wake_irq(struct device * dev)230*4882a593Smuzhiyun void dev_pm_enable_wake_irq(struct device *dev)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct wake_irq *wirq = dev->power.wakeirq;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
235*4882a593Smuzhiyun enable_irq(wirq->irq);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * dev_pm_disable_wake_irq - Disable device wake-up interrupt
241*4882a593Smuzhiyun * @dev: Device
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * Optionally called from the bus code or the device driver for
244*4882a593Smuzhiyun * runtime_suspend() to override the PM runtime core managed wake-up
245*4882a593Smuzhiyun * interrupt handling to disable the wake-up interrupt.
246*4882a593Smuzhiyun */
dev_pm_disable_wake_irq(struct device * dev)247*4882a593Smuzhiyun void dev_pm_disable_wake_irq(struct device *dev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct wake_irq *wirq = dev->power.wakeirq;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
252*4882a593Smuzhiyun disable_irq_nosync(wirq->irq);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
258*4882a593Smuzhiyun * @dev: Device
259*4882a593Smuzhiyun * @can_change_status: Can change wake-up interrupt status
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * Enables wakeirq conditionally. We need to enable wake-up interrupt
262*4882a593Smuzhiyun * lazily on the first rpm_suspend(). This is needed as the consumer device
263*4882a593Smuzhiyun * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
264*4882a593Smuzhiyun * otherwise try to disable already disabled wakeirq. The wake-up interrupt
265*4882a593Smuzhiyun * starts disabled with IRQ_NOAUTOEN set.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * Should be only called from rpm_suspend() and rpm_resume() path.
268*4882a593Smuzhiyun * Caller must hold &dev->power.lock to change wirq->status
269*4882a593Smuzhiyun */
dev_pm_enable_wake_irq_check(struct device * dev,bool can_change_status)270*4882a593Smuzhiyun void dev_pm_enable_wake_irq_check(struct device *dev,
271*4882a593Smuzhiyun bool can_change_status)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct wake_irq *wirq = dev->power.wakeirq;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
276*4882a593Smuzhiyun return;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
279*4882a593Smuzhiyun goto enable;
280*4882a593Smuzhiyun } else if (can_change_status) {
281*4882a593Smuzhiyun wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
282*4882a593Smuzhiyun goto enable;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun enable:
288*4882a593Smuzhiyun enable_irq(wirq->irq);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
293*4882a593Smuzhiyun * @dev: Device
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * Disables wake-up interrupt conditionally based on status.
296*4882a593Smuzhiyun * Should be only called from rpm_suspend() and rpm_resume() path.
297*4882a593Smuzhiyun */
dev_pm_disable_wake_irq_check(struct device * dev)298*4882a593Smuzhiyun void dev_pm_disable_wake_irq_check(struct device *dev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct wake_irq *wirq = dev->power.wakeirq;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
303*4882a593Smuzhiyun return;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
306*4882a593Smuzhiyun disable_irq_nosync(wirq->irq);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun * dev_pm_arm_wake_irq - Arm device wake-up
311*4882a593Smuzhiyun * @wirq: Device wake-up interrupt
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Sets up the wake-up event conditionally based on the
314*4882a593Smuzhiyun * device_may_wake().
315*4882a593Smuzhiyun */
dev_pm_arm_wake_irq(struct wake_irq * wirq)316*4882a593Smuzhiyun void dev_pm_arm_wake_irq(struct wake_irq *wirq)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun if (!wirq)
319*4882a593Smuzhiyun return;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (device_may_wakeup(wirq->dev)) {
322*4882a593Smuzhiyun if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
323*4882a593Smuzhiyun !pm_runtime_status_suspended(wirq->dev))
324*4882a593Smuzhiyun enable_irq(wirq->irq);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun enable_irq_wake(wirq->irq);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /**
331*4882a593Smuzhiyun * dev_pm_disarm_wake_irq - Disarm device wake-up
332*4882a593Smuzhiyun * @wirq: Device wake-up interrupt
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * Clears up the wake-up event conditionally based on the
335*4882a593Smuzhiyun * device_may_wake().
336*4882a593Smuzhiyun */
dev_pm_disarm_wake_irq(struct wake_irq * wirq)337*4882a593Smuzhiyun void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun if (!wirq)
340*4882a593Smuzhiyun return;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (device_may_wakeup(wirq->dev)) {
343*4882a593Smuzhiyun disable_irq_wake(wirq->irq);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
346*4882a593Smuzhiyun !pm_runtime_status_suspended(wirq->dev))
347*4882a593Smuzhiyun disable_irq_nosync(wirq->irq);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun }
350