1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Devices PM QoS constraints management
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2011 Texas Instruments, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This module exposes the interface to kernel space for specifying
8*4882a593Smuzhiyun * per-device PM QoS dependencies. It provides infrastructure for registration
9*4882a593Smuzhiyun * of:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Dependents on a QoS value : register requests
12*4882a593Smuzhiyun * Watchers of QoS value : get notified when target QoS value changes
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This QoS design is best effort based. Dependents register their QoS needs.
15*4882a593Smuzhiyun * Watchers register to keep track of the current QoS needs of the system.
16*4882a593Smuzhiyun * Watchers can register a per-device notification callback using the
17*4882a593Smuzhiyun * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18*4882a593Smuzhiyun * per-device constraint data struct.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Note about the per-device constraint data struct allocation:
21*4882a593Smuzhiyun * . The per-device constraints data struct ptr is stored into the device
22*4882a593Smuzhiyun * dev_pm_info.
23*4882a593Smuzhiyun * . To minimize the data usage by the per-device constraints, the data struct
24*4882a593Smuzhiyun * is only allocated at the first call to dev_pm_qos_add_request.
25*4882a593Smuzhiyun * . The data is later free'd when the device is removed from the system.
26*4882a593Smuzhiyun * . A global mutex protects the constraints users from the data being
27*4882a593Smuzhiyun * allocated and free'd.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/pm_qos.h>
31*4882a593Smuzhiyun #include <linux/spinlock.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/device.h>
34*4882a593Smuzhiyun #include <linux/mutex.h>
35*4882a593Smuzhiyun #include <linux/export.h>
36*4882a593Smuzhiyun #include <linux/pm_runtime.h>
37*4882a593Smuzhiyun #include <linux/err.h>
38*4882a593Smuzhiyun #include <trace/events/power.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "power.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static DEFINE_MUTEX(dev_pm_qos_mtx);
43*4882a593Smuzhiyun static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47*4882a593Smuzhiyun * @dev: Device to check the PM QoS flags for.
48*4882a593Smuzhiyun * @mask: Flags to check against.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * This routine must be called with dev->power.lock held.
51*4882a593Smuzhiyun */
__dev_pm_qos_flags(struct device * dev,s32 mask)52*4882a593Smuzhiyun enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct dev_pm_qos *qos = dev->power.qos;
55*4882a593Smuzhiyun struct pm_qos_flags *pqf;
56*4882a593Smuzhiyun s32 val;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun lockdep_assert_held(&dev->power.lock);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if (IS_ERR_OR_NULL(qos))
61*4882a593Smuzhiyun return PM_QOS_FLAGS_UNDEFINED;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun pqf = &qos->flags;
64*4882a593Smuzhiyun if (list_empty(&pqf->list))
65*4882a593Smuzhiyun return PM_QOS_FLAGS_UNDEFINED;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun val = pqf->effective_flags & mask;
68*4882a593Smuzhiyun if (val)
69*4882a593Smuzhiyun return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return PM_QOS_FLAGS_NONE;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76*4882a593Smuzhiyun * @dev: Device to check the PM QoS flags for.
77*4882a593Smuzhiyun * @mask: Flags to check against.
78*4882a593Smuzhiyun */
dev_pm_qos_flags(struct device * dev,s32 mask)79*4882a593Smuzhiyun enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned long irqflags;
82*4882a593Smuzhiyun enum pm_qos_flags_status ret;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun spin_lock_irqsave(&dev->power.lock, irqflags);
85*4882a593Smuzhiyun ret = __dev_pm_qos_flags(dev, mask);
86*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, irqflags);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun return ret;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
94*4882a593Smuzhiyun * @dev: Device to get the PM QoS constraint value for.
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * This routine must be called with dev->power.lock held.
97*4882a593Smuzhiyun */
__dev_pm_qos_resume_latency(struct device * dev)98*4882a593Smuzhiyun s32 __dev_pm_qos_resume_latency(struct device *dev)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun lockdep_assert_held(&dev->power.lock);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return dev_pm_qos_raw_resume_latency(dev);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107*4882a593Smuzhiyun * @dev: Device to get the PM QoS constraint value for.
108*4882a593Smuzhiyun * @type: QoS request type.
109*4882a593Smuzhiyun */
dev_pm_qos_read_value(struct device * dev,enum dev_pm_qos_req_type type)110*4882a593Smuzhiyun s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct dev_pm_qos *qos = dev->power.qos;
113*4882a593Smuzhiyun unsigned long flags;
114*4882a593Smuzhiyun s32 ret;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun spin_lock_irqsave(&dev->power.lock, flags);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun switch (type) {
119*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
120*4882a593Smuzhiyun ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121*4882a593Smuzhiyun : pm_qos_read_value(&qos->resume_latency);
122*4882a593Smuzhiyun break;
123*4882a593Smuzhiyun case DEV_PM_QOS_MIN_FREQUENCY:
124*4882a593Smuzhiyun ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125*4882a593Smuzhiyun : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun case DEV_PM_QOS_MAX_FREQUENCY:
128*4882a593Smuzhiyun ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129*4882a593Smuzhiyun : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun default:
132*4882a593Smuzhiyun WARN_ON(1);
133*4882a593Smuzhiyun ret = 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * apply_constraint - Add/modify/remove device PM QoS request.
144*4882a593Smuzhiyun * @req: Constraint request to apply
145*4882a593Smuzhiyun * @action: Action to perform (add/update/remove).
146*4882a593Smuzhiyun * @value: Value to assign to the QoS request.
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Internal function to update the constraints list using the PM QoS core
149*4882a593Smuzhiyun * code and if needed call the per-device callbacks.
150*4882a593Smuzhiyun */
apply_constraint(struct dev_pm_qos_request * req,enum pm_qos_req_action action,s32 value)151*4882a593Smuzhiyun static int apply_constraint(struct dev_pm_qos_request *req,
152*4882a593Smuzhiyun enum pm_qos_req_action action, s32 value)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct dev_pm_qos *qos = req->dev->power.qos;
155*4882a593Smuzhiyun int ret;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun switch(req->type) {
158*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
159*4882a593Smuzhiyun if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
160*4882a593Smuzhiyun value = 0;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun ret = pm_qos_update_target(&qos->resume_latency,
163*4882a593Smuzhiyun &req->data.pnode, action, value);
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun case DEV_PM_QOS_LATENCY_TOLERANCE:
166*4882a593Smuzhiyun ret = pm_qos_update_target(&qos->latency_tolerance,
167*4882a593Smuzhiyun &req->data.pnode, action, value);
168*4882a593Smuzhiyun if (ret) {
169*4882a593Smuzhiyun value = pm_qos_read_value(&qos->latency_tolerance);
170*4882a593Smuzhiyun req->dev->power.set_latency_tolerance(req->dev, value);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun break;
173*4882a593Smuzhiyun case DEV_PM_QOS_MIN_FREQUENCY:
174*4882a593Smuzhiyun case DEV_PM_QOS_MAX_FREQUENCY:
175*4882a593Smuzhiyun ret = freq_qos_apply(&req->data.freq, action, value);
176*4882a593Smuzhiyun break;
177*4882a593Smuzhiyun case DEV_PM_QOS_FLAGS:
178*4882a593Smuzhiyun ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
179*4882a593Smuzhiyun action, value);
180*4882a593Smuzhiyun break;
181*4882a593Smuzhiyun default:
182*4882a593Smuzhiyun ret = -EINVAL;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return ret;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * dev_pm_qos_constraints_allocate
190*4882a593Smuzhiyun * @dev: device to allocate data for
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * Called at the first call to add_request, for constraint data allocation
193*4882a593Smuzhiyun * Must be called with the dev_pm_qos_mtx mutex held
194*4882a593Smuzhiyun */
dev_pm_qos_constraints_allocate(struct device * dev)195*4882a593Smuzhiyun static int dev_pm_qos_constraints_allocate(struct device *dev)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct dev_pm_qos *qos;
198*4882a593Smuzhiyun struct pm_qos_constraints *c;
199*4882a593Smuzhiyun struct blocking_notifier_head *n;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun qos = kzalloc(sizeof(*qos), GFP_KERNEL);
202*4882a593Smuzhiyun if (!qos)
203*4882a593Smuzhiyun return -ENOMEM;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
206*4882a593Smuzhiyun if (!n) {
207*4882a593Smuzhiyun kfree(qos);
208*4882a593Smuzhiyun return -ENOMEM;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun c = &qos->resume_latency;
212*4882a593Smuzhiyun plist_head_init(&c->list);
213*4882a593Smuzhiyun c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
214*4882a593Smuzhiyun c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
215*4882a593Smuzhiyun c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
216*4882a593Smuzhiyun c->type = PM_QOS_MIN;
217*4882a593Smuzhiyun c->notifiers = n;
218*4882a593Smuzhiyun BLOCKING_INIT_NOTIFIER_HEAD(n);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun c = &qos->latency_tolerance;
221*4882a593Smuzhiyun plist_head_init(&c->list);
222*4882a593Smuzhiyun c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
223*4882a593Smuzhiyun c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
224*4882a593Smuzhiyun c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
225*4882a593Smuzhiyun c->type = PM_QOS_MIN;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun freq_constraints_init(&qos->freq);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun INIT_LIST_HEAD(&qos->flags.list);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun spin_lock_irq(&dev->power.lock);
232*4882a593Smuzhiyun dev->power.qos = qos;
233*4882a593Smuzhiyun spin_unlock_irq(&dev->power.lock);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun static void __dev_pm_qos_hide_latency_limit(struct device *dev);
239*4882a593Smuzhiyun static void __dev_pm_qos_hide_flags(struct device *dev);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun * dev_pm_qos_constraints_destroy
243*4882a593Smuzhiyun * @dev: target device
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * Called from the device PM subsystem on device removal under device_pm_lock().
246*4882a593Smuzhiyun */
dev_pm_qos_constraints_destroy(struct device * dev)247*4882a593Smuzhiyun void dev_pm_qos_constraints_destroy(struct device *dev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct dev_pm_qos *qos;
250*4882a593Smuzhiyun struct dev_pm_qos_request *req, *tmp;
251*4882a593Smuzhiyun struct pm_qos_constraints *c;
252*4882a593Smuzhiyun struct pm_qos_flags *f;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * If the device's PM QoS resume latency limit or PM QoS flags have been
258*4882a593Smuzhiyun * exposed to user space, they have to be hidden at this point.
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun pm_qos_sysfs_remove_resume_latency(dev);
261*4882a593Smuzhiyun pm_qos_sysfs_remove_flags(dev);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun __dev_pm_qos_hide_latency_limit(dev);
266*4882a593Smuzhiyun __dev_pm_qos_hide_flags(dev);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun qos = dev->power.qos;
269*4882a593Smuzhiyun if (!qos)
270*4882a593Smuzhiyun goto out;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Flush the constraints lists for the device. */
273*4882a593Smuzhiyun c = &qos->resume_latency;
274*4882a593Smuzhiyun plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun * Update constraints list and call the notification
277*4882a593Smuzhiyun * callbacks if needed
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
280*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun c = &qos->latency_tolerance;
284*4882a593Smuzhiyun plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
285*4882a593Smuzhiyun apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
286*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun c = &qos->freq.min_freq;
290*4882a593Smuzhiyun plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
291*4882a593Smuzhiyun apply_constraint(req, PM_QOS_REMOVE_REQ,
292*4882a593Smuzhiyun PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
293*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun c = &qos->freq.max_freq;
297*4882a593Smuzhiyun plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
298*4882a593Smuzhiyun apply_constraint(req, PM_QOS_REMOVE_REQ,
299*4882a593Smuzhiyun PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
300*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun f = &qos->flags;
304*4882a593Smuzhiyun list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
305*4882a593Smuzhiyun apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
306*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun spin_lock_irq(&dev->power.lock);
310*4882a593Smuzhiyun dev->power.qos = ERR_PTR(-ENODEV);
311*4882a593Smuzhiyun spin_unlock_irq(&dev->power.lock);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun kfree(qos->resume_latency.notifiers);
314*4882a593Smuzhiyun kfree(qos);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun out:
317*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
dev_pm_qos_invalid_req_type(struct device * dev,enum dev_pm_qos_req_type type)322*4882a593Smuzhiyun static bool dev_pm_qos_invalid_req_type(struct device *dev,
323*4882a593Smuzhiyun enum dev_pm_qos_req_type type)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
326*4882a593Smuzhiyun !dev->power.set_latency_tolerance;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
__dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)329*4882a593Smuzhiyun static int __dev_pm_qos_add_request(struct device *dev,
330*4882a593Smuzhiyun struct dev_pm_qos_request *req,
331*4882a593Smuzhiyun enum dev_pm_qos_req_type type, s32 value)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun int ret = 0;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
336*4882a593Smuzhiyun return -EINVAL;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (WARN(dev_pm_qos_request_active(req),
339*4882a593Smuzhiyun "%s() called for already added request\n", __func__))
340*4882a593Smuzhiyun return -EINVAL;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (IS_ERR(dev->power.qos))
343*4882a593Smuzhiyun ret = -ENODEV;
344*4882a593Smuzhiyun else if (!dev->power.qos)
345*4882a593Smuzhiyun ret = dev_pm_qos_constraints_allocate(dev);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun trace_dev_pm_qos_add_request(dev_name(dev), type, value);
348*4882a593Smuzhiyun if (ret)
349*4882a593Smuzhiyun return ret;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun req->dev = dev;
352*4882a593Smuzhiyun req->type = type;
353*4882a593Smuzhiyun if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
354*4882a593Smuzhiyun ret = freq_qos_add_request(&dev->power.qos->freq,
355*4882a593Smuzhiyun &req->data.freq,
356*4882a593Smuzhiyun FREQ_QOS_MIN, value);
357*4882a593Smuzhiyun else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
358*4882a593Smuzhiyun ret = freq_qos_add_request(&dev->power.qos->freq,
359*4882a593Smuzhiyun &req->data.freq,
360*4882a593Smuzhiyun FREQ_QOS_MAX, value);
361*4882a593Smuzhiyun else
362*4882a593Smuzhiyun ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return ret;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun * dev_pm_qos_add_request - inserts new qos request into the list
369*4882a593Smuzhiyun * @dev: target device for the constraint
370*4882a593Smuzhiyun * @req: pointer to a preallocated handle
371*4882a593Smuzhiyun * @type: type of the request
372*4882a593Smuzhiyun * @value: defines the qos request
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * This function inserts a new entry in the device constraints list of
375*4882a593Smuzhiyun * requested qos performance characteristics. It recomputes the aggregate
376*4882a593Smuzhiyun * QoS expectations of parameters and initializes the dev_pm_qos_request
377*4882a593Smuzhiyun * handle. Caller needs to save this handle for later use in updates and
378*4882a593Smuzhiyun * removal.
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * Returns 1 if the aggregated constraint value has changed,
381*4882a593Smuzhiyun * 0 if the aggregated constraint value has not changed,
382*4882a593Smuzhiyun * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
383*4882a593Smuzhiyun * to allocate for data structures, -ENODEV if the device has just been removed
384*4882a593Smuzhiyun * from the system.
385*4882a593Smuzhiyun *
386*4882a593Smuzhiyun * Callers should ensure that the target device is not RPM_SUSPENDED before
387*4882a593Smuzhiyun * using this function for requests of type DEV_PM_QOS_FLAGS.
388*4882a593Smuzhiyun */
dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)389*4882a593Smuzhiyun int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
390*4882a593Smuzhiyun enum dev_pm_qos_req_type type, s32 value)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun int ret;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
395*4882a593Smuzhiyun ret = __dev_pm_qos_add_request(dev, req, type, value);
396*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
397*4882a593Smuzhiyun return ret;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /**
402*4882a593Smuzhiyun * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
403*4882a593Smuzhiyun * @req : PM QoS request to modify.
404*4882a593Smuzhiyun * @new_value: New value to request.
405*4882a593Smuzhiyun */
__dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)406*4882a593Smuzhiyun static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
407*4882a593Smuzhiyun s32 new_value)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun s32 curr_value;
410*4882a593Smuzhiyun int ret = 0;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (!req) /*guard against callers passing in null */
413*4882a593Smuzhiyun return -EINVAL;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (WARN(!dev_pm_qos_request_active(req),
416*4882a593Smuzhiyun "%s() called for unknown object\n", __func__))
417*4882a593Smuzhiyun return -EINVAL;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (IS_ERR_OR_NULL(req->dev->power.qos))
420*4882a593Smuzhiyun return -ENODEV;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun switch(req->type) {
423*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
424*4882a593Smuzhiyun case DEV_PM_QOS_LATENCY_TOLERANCE:
425*4882a593Smuzhiyun curr_value = req->data.pnode.prio;
426*4882a593Smuzhiyun break;
427*4882a593Smuzhiyun case DEV_PM_QOS_MIN_FREQUENCY:
428*4882a593Smuzhiyun case DEV_PM_QOS_MAX_FREQUENCY:
429*4882a593Smuzhiyun curr_value = req->data.freq.pnode.prio;
430*4882a593Smuzhiyun break;
431*4882a593Smuzhiyun case DEV_PM_QOS_FLAGS:
432*4882a593Smuzhiyun curr_value = req->data.flr.flags;
433*4882a593Smuzhiyun break;
434*4882a593Smuzhiyun default:
435*4882a593Smuzhiyun return -EINVAL;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
439*4882a593Smuzhiyun new_value);
440*4882a593Smuzhiyun if (curr_value != new_value)
441*4882a593Smuzhiyun ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun return ret;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /**
447*4882a593Smuzhiyun * dev_pm_qos_update_request - modifies an existing qos request
448*4882a593Smuzhiyun * @req : handle to list element holding a dev_pm_qos request to use
449*4882a593Smuzhiyun * @new_value: defines the qos request
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * Updates an existing dev PM qos request along with updating the
452*4882a593Smuzhiyun * target value.
453*4882a593Smuzhiyun *
454*4882a593Smuzhiyun * Attempts are made to make this code callable on hot code paths.
455*4882a593Smuzhiyun *
456*4882a593Smuzhiyun * Returns 1 if the aggregated constraint value has changed,
457*4882a593Smuzhiyun * 0 if the aggregated constraint value has not changed,
458*4882a593Smuzhiyun * -EINVAL in case of wrong parameters, -ENODEV if the device has been
459*4882a593Smuzhiyun * removed from the system
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * Callers should ensure that the target device is not RPM_SUSPENDED before
462*4882a593Smuzhiyun * using this function for requests of type DEV_PM_QOS_FLAGS.
463*4882a593Smuzhiyun */
dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)464*4882a593Smuzhiyun int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun int ret;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
469*4882a593Smuzhiyun ret = __dev_pm_qos_update_request(req, new_value);
470*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
471*4882a593Smuzhiyun return ret;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
474*4882a593Smuzhiyun
__dev_pm_qos_remove_request(struct dev_pm_qos_request * req)475*4882a593Smuzhiyun static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun int ret;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (!req) /*guard against callers passing in null */
480*4882a593Smuzhiyun return -EINVAL;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (WARN(!dev_pm_qos_request_active(req),
483*4882a593Smuzhiyun "%s() called for unknown object\n", __func__))
484*4882a593Smuzhiyun return -EINVAL;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (IS_ERR_OR_NULL(req->dev->power.qos))
487*4882a593Smuzhiyun return -ENODEV;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
490*4882a593Smuzhiyun PM_QOS_DEFAULT_VALUE);
491*4882a593Smuzhiyun ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
492*4882a593Smuzhiyun memset(req, 0, sizeof(*req));
493*4882a593Smuzhiyun return ret;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /**
497*4882a593Smuzhiyun * dev_pm_qos_remove_request - modifies an existing qos request
498*4882a593Smuzhiyun * @req: handle to request list element
499*4882a593Smuzhiyun *
500*4882a593Smuzhiyun * Will remove pm qos request from the list of constraints and
501*4882a593Smuzhiyun * recompute the current target value. Call this on slow code paths.
502*4882a593Smuzhiyun *
503*4882a593Smuzhiyun * Returns 1 if the aggregated constraint value has changed,
504*4882a593Smuzhiyun * 0 if the aggregated constraint value has not changed,
505*4882a593Smuzhiyun * -EINVAL in case of wrong parameters, -ENODEV if the device has been
506*4882a593Smuzhiyun * removed from the system
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Callers should ensure that the target device is not RPM_SUSPENDED before
509*4882a593Smuzhiyun * using this function for requests of type DEV_PM_QOS_FLAGS.
510*4882a593Smuzhiyun */
dev_pm_qos_remove_request(struct dev_pm_qos_request * req)511*4882a593Smuzhiyun int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun int ret;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
516*4882a593Smuzhiyun ret = __dev_pm_qos_remove_request(req);
517*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
518*4882a593Smuzhiyun return ret;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /**
523*4882a593Smuzhiyun * dev_pm_qos_add_notifier - sets notification entry for changes to target value
524*4882a593Smuzhiyun * of per-device PM QoS constraints
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * @dev: target device for the constraint
527*4882a593Smuzhiyun * @notifier: notifier block managed by caller.
528*4882a593Smuzhiyun * @type: request type.
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * Will register the notifier into a notification chain that gets called
531*4882a593Smuzhiyun * upon changes to the target value for the device.
532*4882a593Smuzhiyun *
533*4882a593Smuzhiyun * If the device's constraints object doesn't exist when this routine is called,
534*4882a593Smuzhiyun * it will be created (or error code will be returned if that fails).
535*4882a593Smuzhiyun */
dev_pm_qos_add_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)536*4882a593Smuzhiyun int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
537*4882a593Smuzhiyun enum dev_pm_qos_req_type type)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun int ret = 0;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (IS_ERR(dev->power.qos))
544*4882a593Smuzhiyun ret = -ENODEV;
545*4882a593Smuzhiyun else if (!dev->power.qos)
546*4882a593Smuzhiyun ret = dev_pm_qos_constraints_allocate(dev);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (ret)
549*4882a593Smuzhiyun goto unlock;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun switch (type) {
552*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
553*4882a593Smuzhiyun ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
554*4882a593Smuzhiyun notifier);
555*4882a593Smuzhiyun break;
556*4882a593Smuzhiyun case DEV_PM_QOS_MIN_FREQUENCY:
557*4882a593Smuzhiyun ret = freq_qos_add_notifier(&dev->power.qos->freq,
558*4882a593Smuzhiyun FREQ_QOS_MIN, notifier);
559*4882a593Smuzhiyun break;
560*4882a593Smuzhiyun case DEV_PM_QOS_MAX_FREQUENCY:
561*4882a593Smuzhiyun ret = freq_qos_add_notifier(&dev->power.qos->freq,
562*4882a593Smuzhiyun FREQ_QOS_MAX, notifier);
563*4882a593Smuzhiyun break;
564*4882a593Smuzhiyun default:
565*4882a593Smuzhiyun WARN_ON(1);
566*4882a593Smuzhiyun ret = -EINVAL;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun unlock:
570*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
571*4882a593Smuzhiyun return ret;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /**
576*4882a593Smuzhiyun * dev_pm_qos_remove_notifier - deletes notification for changes to target value
577*4882a593Smuzhiyun * of per-device PM QoS constraints
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * @dev: target device for the constraint
580*4882a593Smuzhiyun * @notifier: notifier block to be removed.
581*4882a593Smuzhiyun * @type: request type.
582*4882a593Smuzhiyun *
583*4882a593Smuzhiyun * Will remove the notifier from the notification chain that gets called
584*4882a593Smuzhiyun * upon changes to the target value.
585*4882a593Smuzhiyun */
dev_pm_qos_remove_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)586*4882a593Smuzhiyun int dev_pm_qos_remove_notifier(struct device *dev,
587*4882a593Smuzhiyun struct notifier_block *notifier,
588*4882a593Smuzhiyun enum dev_pm_qos_req_type type)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun int ret = 0;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* Silently return if the constraints object is not present. */
595*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dev->power.qos))
596*4882a593Smuzhiyun goto unlock;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun switch (type) {
599*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
600*4882a593Smuzhiyun ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
601*4882a593Smuzhiyun notifier);
602*4882a593Smuzhiyun break;
603*4882a593Smuzhiyun case DEV_PM_QOS_MIN_FREQUENCY:
604*4882a593Smuzhiyun ret = freq_qos_remove_notifier(&dev->power.qos->freq,
605*4882a593Smuzhiyun FREQ_QOS_MIN, notifier);
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun case DEV_PM_QOS_MAX_FREQUENCY:
608*4882a593Smuzhiyun ret = freq_qos_remove_notifier(&dev->power.qos->freq,
609*4882a593Smuzhiyun FREQ_QOS_MAX, notifier);
610*4882a593Smuzhiyun break;
611*4882a593Smuzhiyun default:
612*4882a593Smuzhiyun WARN_ON(1);
613*4882a593Smuzhiyun ret = -EINVAL;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun unlock:
617*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
618*4882a593Smuzhiyun return ret;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /**
623*4882a593Smuzhiyun * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
624*4882a593Smuzhiyun * @dev: Device whose ancestor to add the request for.
625*4882a593Smuzhiyun * @req: Pointer to the preallocated handle.
626*4882a593Smuzhiyun * @type: Type of the request.
627*4882a593Smuzhiyun * @value: Constraint latency value.
628*4882a593Smuzhiyun */
dev_pm_qos_add_ancestor_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)629*4882a593Smuzhiyun int dev_pm_qos_add_ancestor_request(struct device *dev,
630*4882a593Smuzhiyun struct dev_pm_qos_request *req,
631*4882a593Smuzhiyun enum dev_pm_qos_req_type type, s32 value)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct device *ancestor = dev->parent;
634*4882a593Smuzhiyun int ret = -ENODEV;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun switch (type) {
637*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
638*4882a593Smuzhiyun while (ancestor && !ancestor->power.ignore_children)
639*4882a593Smuzhiyun ancestor = ancestor->parent;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun break;
642*4882a593Smuzhiyun case DEV_PM_QOS_LATENCY_TOLERANCE:
643*4882a593Smuzhiyun while (ancestor && !ancestor->power.set_latency_tolerance)
644*4882a593Smuzhiyun ancestor = ancestor->parent;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun break;
647*4882a593Smuzhiyun default:
648*4882a593Smuzhiyun ancestor = NULL;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun if (ancestor)
651*4882a593Smuzhiyun ret = dev_pm_qos_add_request(ancestor, req, type, value);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (ret < 0)
654*4882a593Smuzhiyun req->dev = NULL;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun return ret;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
659*4882a593Smuzhiyun
__dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)660*4882a593Smuzhiyun static void __dev_pm_qos_drop_user_request(struct device *dev,
661*4882a593Smuzhiyun enum dev_pm_qos_req_type type)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun struct dev_pm_qos_request *req = NULL;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun switch(type) {
666*4882a593Smuzhiyun case DEV_PM_QOS_RESUME_LATENCY:
667*4882a593Smuzhiyun req = dev->power.qos->resume_latency_req;
668*4882a593Smuzhiyun dev->power.qos->resume_latency_req = NULL;
669*4882a593Smuzhiyun break;
670*4882a593Smuzhiyun case DEV_PM_QOS_LATENCY_TOLERANCE:
671*4882a593Smuzhiyun req = dev->power.qos->latency_tolerance_req;
672*4882a593Smuzhiyun dev->power.qos->latency_tolerance_req = NULL;
673*4882a593Smuzhiyun break;
674*4882a593Smuzhiyun case DEV_PM_QOS_FLAGS:
675*4882a593Smuzhiyun req = dev->power.qos->flags_req;
676*4882a593Smuzhiyun dev->power.qos->flags_req = NULL;
677*4882a593Smuzhiyun break;
678*4882a593Smuzhiyun default:
679*4882a593Smuzhiyun WARN_ON(1);
680*4882a593Smuzhiyun return;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun __dev_pm_qos_remove_request(req);
683*4882a593Smuzhiyun kfree(req);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)686*4882a593Smuzhiyun static void dev_pm_qos_drop_user_request(struct device *dev,
687*4882a593Smuzhiyun enum dev_pm_qos_req_type type)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
690*4882a593Smuzhiyun __dev_pm_qos_drop_user_request(dev, type);
691*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /**
695*4882a593Smuzhiyun * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
696*4882a593Smuzhiyun * @dev: Device whose PM QoS latency limit is to be exposed to user space.
697*4882a593Smuzhiyun * @value: Initial value of the latency limit.
698*4882a593Smuzhiyun */
dev_pm_qos_expose_latency_limit(struct device * dev,s32 value)699*4882a593Smuzhiyun int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun struct dev_pm_qos_request *req;
702*4882a593Smuzhiyun int ret;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun if (!device_is_registered(dev) || value < 0)
705*4882a593Smuzhiyun return -EINVAL;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
708*4882a593Smuzhiyun if (!req)
709*4882a593Smuzhiyun return -ENOMEM;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
712*4882a593Smuzhiyun if (ret < 0) {
713*4882a593Smuzhiyun kfree(req);
714*4882a593Smuzhiyun return ret;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dev->power.qos))
722*4882a593Smuzhiyun ret = -ENODEV;
723*4882a593Smuzhiyun else if (dev->power.qos->resume_latency_req)
724*4882a593Smuzhiyun ret = -EEXIST;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (ret < 0) {
727*4882a593Smuzhiyun __dev_pm_qos_remove_request(req);
728*4882a593Smuzhiyun kfree(req);
729*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
730*4882a593Smuzhiyun goto out;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun dev->power.qos->resume_latency_req = req;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun ret = pm_qos_sysfs_add_resume_latency(dev);
737*4882a593Smuzhiyun if (ret)
738*4882a593Smuzhiyun dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun out:
741*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
742*4882a593Smuzhiyun return ret;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
745*4882a593Smuzhiyun
__dev_pm_qos_hide_latency_limit(struct device * dev)746*4882a593Smuzhiyun static void __dev_pm_qos_hide_latency_limit(struct device *dev)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
749*4882a593Smuzhiyun __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /**
753*4882a593Smuzhiyun * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
754*4882a593Smuzhiyun * @dev: Device whose PM QoS latency limit is to be hidden from user space.
755*4882a593Smuzhiyun */
dev_pm_qos_hide_latency_limit(struct device * dev)756*4882a593Smuzhiyun void dev_pm_qos_hide_latency_limit(struct device *dev)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun pm_qos_sysfs_remove_resume_latency(dev);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
763*4882a593Smuzhiyun __dev_pm_qos_hide_latency_limit(dev);
764*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /**
771*4882a593Smuzhiyun * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
772*4882a593Smuzhiyun * @dev: Device whose PM QoS flags are to be exposed to user space.
773*4882a593Smuzhiyun * @val: Initial values of the flags.
774*4882a593Smuzhiyun */
dev_pm_qos_expose_flags(struct device * dev,s32 val)775*4882a593Smuzhiyun int dev_pm_qos_expose_flags(struct device *dev, s32 val)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct dev_pm_qos_request *req;
778*4882a593Smuzhiyun int ret;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (!device_is_registered(dev))
781*4882a593Smuzhiyun return -EINVAL;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
784*4882a593Smuzhiyun if (!req)
785*4882a593Smuzhiyun return -ENOMEM;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
788*4882a593Smuzhiyun if (ret < 0) {
789*4882a593Smuzhiyun kfree(req);
790*4882a593Smuzhiyun return ret;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun pm_runtime_get_sync(dev);
794*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dev->power.qos))
799*4882a593Smuzhiyun ret = -ENODEV;
800*4882a593Smuzhiyun else if (dev->power.qos->flags_req)
801*4882a593Smuzhiyun ret = -EEXIST;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (ret < 0) {
804*4882a593Smuzhiyun __dev_pm_qos_remove_request(req);
805*4882a593Smuzhiyun kfree(req);
806*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
807*4882a593Smuzhiyun goto out;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun dev->power.qos->flags_req = req;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun ret = pm_qos_sysfs_add_flags(dev);
814*4882a593Smuzhiyun if (ret)
815*4882a593Smuzhiyun dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun out:
818*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
819*4882a593Smuzhiyun pm_runtime_put(dev);
820*4882a593Smuzhiyun return ret;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
823*4882a593Smuzhiyun
__dev_pm_qos_hide_flags(struct device * dev)824*4882a593Smuzhiyun static void __dev_pm_qos_hide_flags(struct device *dev)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
827*4882a593Smuzhiyun __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /**
831*4882a593Smuzhiyun * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
832*4882a593Smuzhiyun * @dev: Device whose PM QoS flags are to be hidden from user space.
833*4882a593Smuzhiyun */
dev_pm_qos_hide_flags(struct device * dev)834*4882a593Smuzhiyun void dev_pm_qos_hide_flags(struct device *dev)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun pm_runtime_get_sync(dev);
837*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun pm_qos_sysfs_remove_flags(dev);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
842*4882a593Smuzhiyun __dev_pm_qos_hide_flags(dev);
843*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
846*4882a593Smuzhiyun pm_runtime_put(dev);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /**
851*4882a593Smuzhiyun * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
852*4882a593Smuzhiyun * @dev: Device to update the PM QoS flags request for.
853*4882a593Smuzhiyun * @mask: Flags to set/clear.
854*4882a593Smuzhiyun * @set: Whether to set or clear the flags (true means set).
855*4882a593Smuzhiyun */
dev_pm_qos_update_flags(struct device * dev,s32 mask,bool set)856*4882a593Smuzhiyun int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun s32 value;
859*4882a593Smuzhiyun int ret;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun pm_runtime_get_sync(dev);
862*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
865*4882a593Smuzhiyun ret = -EINVAL;
866*4882a593Smuzhiyun goto out;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun value = dev_pm_qos_requested_flags(dev);
870*4882a593Smuzhiyun if (set)
871*4882a593Smuzhiyun value |= mask;
872*4882a593Smuzhiyun else
873*4882a593Smuzhiyun value &= ~mask;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun out:
878*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
879*4882a593Smuzhiyun pm_runtime_put(dev);
880*4882a593Smuzhiyun return ret;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /**
884*4882a593Smuzhiyun * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
885*4882a593Smuzhiyun * @dev: Device to obtain the user space latency tolerance for.
886*4882a593Smuzhiyun */
dev_pm_qos_get_user_latency_tolerance(struct device * dev)887*4882a593Smuzhiyun s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun s32 ret;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
892*4882a593Smuzhiyun ret = IS_ERR_OR_NULL(dev->power.qos)
893*4882a593Smuzhiyun || !dev->power.qos->latency_tolerance_req ?
894*4882a593Smuzhiyun PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
895*4882a593Smuzhiyun dev->power.qos->latency_tolerance_req->data.pnode.prio;
896*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
897*4882a593Smuzhiyun return ret;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /**
901*4882a593Smuzhiyun * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
902*4882a593Smuzhiyun * @dev: Device to update the user space latency tolerance for.
903*4882a593Smuzhiyun * @val: New user space latency tolerance for @dev (negative values disable).
904*4882a593Smuzhiyun */
dev_pm_qos_update_user_latency_tolerance(struct device * dev,s32 val)905*4882a593Smuzhiyun int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun int ret;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_mtx);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dev->power.qos)
912*4882a593Smuzhiyun || !dev->power.qos->latency_tolerance_req) {
913*4882a593Smuzhiyun struct dev_pm_qos_request *req;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (val < 0) {
916*4882a593Smuzhiyun if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
917*4882a593Smuzhiyun ret = 0;
918*4882a593Smuzhiyun else
919*4882a593Smuzhiyun ret = -EINVAL;
920*4882a593Smuzhiyun goto out;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
923*4882a593Smuzhiyun if (!req) {
924*4882a593Smuzhiyun ret = -ENOMEM;
925*4882a593Smuzhiyun goto out;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
928*4882a593Smuzhiyun if (ret < 0) {
929*4882a593Smuzhiyun kfree(req);
930*4882a593Smuzhiyun goto out;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun dev->power.qos->latency_tolerance_req = req;
933*4882a593Smuzhiyun } else {
934*4882a593Smuzhiyun if (val < 0) {
935*4882a593Smuzhiyun __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
936*4882a593Smuzhiyun ret = 0;
937*4882a593Smuzhiyun } else {
938*4882a593Smuzhiyun ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun out:
943*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_mtx);
944*4882a593Smuzhiyun return ret;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /**
949*4882a593Smuzhiyun * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
950*4882a593Smuzhiyun * @dev: Device whose latency tolerance to expose
951*4882a593Smuzhiyun */
dev_pm_qos_expose_latency_tolerance(struct device * dev)952*4882a593Smuzhiyun int dev_pm_qos_expose_latency_tolerance(struct device *dev)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun int ret;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun if (!dev->power.set_latency_tolerance)
957*4882a593Smuzhiyun return -EINVAL;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
960*4882a593Smuzhiyun ret = pm_qos_sysfs_add_latency_tolerance(dev);
961*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun return ret;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /**
968*4882a593Smuzhiyun * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
969*4882a593Smuzhiyun * @dev: Device whose latency tolerance to hide
970*4882a593Smuzhiyun */
dev_pm_qos_hide_latency_tolerance(struct device * dev)971*4882a593Smuzhiyun void dev_pm_qos_hide_latency_tolerance(struct device *dev)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun mutex_lock(&dev_pm_qos_sysfs_mtx);
974*4882a593Smuzhiyun pm_qos_sysfs_remove_latency_tolerance(dev);
975*4882a593Smuzhiyun mutex_unlock(&dev_pm_qos_sysfs_mtx);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /* Remove the request from user space now */
978*4882a593Smuzhiyun pm_runtime_get_sync(dev);
979*4882a593Smuzhiyun dev_pm_qos_update_user_latency_tolerance(dev,
980*4882a593Smuzhiyun PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
981*4882a593Smuzhiyun pm_runtime_put(dev);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
984