xref: /OK3568_Linux_fs/kernel/drivers/base/power/domain.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * drivers/base/power/domain.c - Common code related to device power domains.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #define pr_fmt(fmt) "PM: " fmt
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/pm_opp.h>
14*4882a593Smuzhiyun #include <linux/pm_runtime.h>
15*4882a593Smuzhiyun #include <linux/pm_domain.h>
16*4882a593Smuzhiyun #include <linux/pm_qos.h>
17*4882a593Smuzhiyun #include <linux/pm_clock.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/err.h>
20*4882a593Smuzhiyun #include <linux/sched.h>
21*4882a593Smuzhiyun #include <linux/suspend.h>
22*4882a593Smuzhiyun #include <linux/export.h>
23*4882a593Smuzhiyun #include <linux/cpu.h>
24*4882a593Smuzhiyun #include <linux/debugfs.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "power.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define GENPD_RETRY_MAX_MS	250		/* Approximate */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31*4882a593Smuzhiyun ({								\
32*4882a593Smuzhiyun 	type (*__routine)(struct device *__d); 			\
33*4882a593Smuzhiyun 	type __ret = (type)0;					\
34*4882a593Smuzhiyun 								\
35*4882a593Smuzhiyun 	__routine = genpd->dev_ops.callback; 			\
36*4882a593Smuzhiyun 	if (__routine) {					\
37*4882a593Smuzhiyun 		__ret = __routine(dev); 			\
38*4882a593Smuzhiyun 	}							\
39*4882a593Smuzhiyun 	__ret;							\
40*4882a593Smuzhiyun })
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static LIST_HEAD(gpd_list);
43*4882a593Smuzhiyun static DEFINE_MUTEX(gpd_list_lock);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct genpd_lock_ops {
46*4882a593Smuzhiyun 	void (*lock)(struct generic_pm_domain *genpd);
47*4882a593Smuzhiyun 	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48*4882a593Smuzhiyun 	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49*4882a593Smuzhiyun 	void (*unlock)(struct generic_pm_domain *genpd);
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
genpd_lock_mtx(struct generic_pm_domain * genpd)52*4882a593Smuzhiyun static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	mutex_lock(&genpd->mlock);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
genpd_lock_nested_mtx(struct generic_pm_domain * genpd,int depth)57*4882a593Smuzhiyun static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58*4882a593Smuzhiyun 					int depth)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	mutex_lock_nested(&genpd->mlock, depth);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
genpd_lock_interruptible_mtx(struct generic_pm_domain * genpd)63*4882a593Smuzhiyun static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	return mutex_lock_interruptible(&genpd->mlock);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
genpd_unlock_mtx(struct generic_pm_domain * genpd)68*4882a593Smuzhiyun static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	return mutex_unlock(&genpd->mlock);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun static const struct genpd_lock_ops genpd_mtx_ops = {
74*4882a593Smuzhiyun 	.lock = genpd_lock_mtx,
75*4882a593Smuzhiyun 	.lock_nested = genpd_lock_nested_mtx,
76*4882a593Smuzhiyun 	.lock_interruptible = genpd_lock_interruptible_mtx,
77*4882a593Smuzhiyun 	.unlock = genpd_unlock_mtx,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
genpd_lock_spin(struct generic_pm_domain * genpd)80*4882a593Smuzhiyun static void genpd_lock_spin(struct generic_pm_domain *genpd)
81*4882a593Smuzhiyun 	__acquires(&genpd->slock)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	unsigned long flags;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	spin_lock_irqsave(&genpd->slock, flags);
86*4882a593Smuzhiyun 	genpd->lock_flags = flags;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
genpd_lock_nested_spin(struct generic_pm_domain * genpd,int depth)89*4882a593Smuzhiyun static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90*4882a593Smuzhiyun 					int depth)
91*4882a593Smuzhiyun 	__acquires(&genpd->slock)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	unsigned long flags;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96*4882a593Smuzhiyun 	genpd->lock_flags = flags;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
genpd_lock_interruptible_spin(struct generic_pm_domain * genpd)99*4882a593Smuzhiyun static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100*4882a593Smuzhiyun 	__acquires(&genpd->slock)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	unsigned long flags;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	spin_lock_irqsave(&genpd->slock, flags);
105*4882a593Smuzhiyun 	genpd->lock_flags = flags;
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
genpd_unlock_spin(struct generic_pm_domain * genpd)109*4882a593Smuzhiyun static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110*4882a593Smuzhiyun 	__releases(&genpd->slock)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun static const struct genpd_lock_ops genpd_spin_ops = {
116*4882a593Smuzhiyun 	.lock = genpd_lock_spin,
117*4882a593Smuzhiyun 	.lock_nested = genpd_lock_nested_spin,
118*4882a593Smuzhiyun 	.lock_interruptible = genpd_lock_interruptible_spin,
119*4882a593Smuzhiyun 	.unlock = genpd_unlock_spin,
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #define genpd_lock(p)			p->lock_ops->lock(p)
123*4882a593Smuzhiyun #define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124*4882a593Smuzhiyun #define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125*4882a593Smuzhiyun #define genpd_unlock(p)			p->lock_ops->unlock(p)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128*4882a593Smuzhiyun #define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129*4882a593Smuzhiyun #define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130*4882a593Smuzhiyun #define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131*4882a593Smuzhiyun #define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132*4882a593Smuzhiyun #define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133*4882a593Smuzhiyun 
irq_safe_dev_in_no_sleep_domain(struct device * dev,const struct generic_pm_domain * genpd)134*4882a593Smuzhiyun static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
135*4882a593Smuzhiyun 		const struct generic_pm_domain *genpd)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	bool ret;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/*
142*4882a593Smuzhiyun 	 * Warn once if an IRQ safe device is attached to a no sleep domain, as
143*4882a593Smuzhiyun 	 * to indicate a suboptimal configuration for PM. For an always on
144*4882a593Smuzhiyun 	 * domain this isn't case, thus don't warn.
145*4882a593Smuzhiyun 	 */
146*4882a593Smuzhiyun 	if (ret && !genpd_is_always_on(genpd))
147*4882a593Smuzhiyun 		dev_warn_once(dev, "PM domain %s will not be powered off\n",
148*4882a593Smuzhiyun 				genpd->name);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return ret;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun static int genpd_runtime_suspend(struct device *dev);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun  * Get the generic PM domain for a particular struct device.
157*4882a593Smuzhiyun  * This validates the struct device pointer, the PM domain pointer,
158*4882a593Smuzhiyun  * and checks that the PM domain pointer is a real generic PM domain.
159*4882a593Smuzhiyun  * Any failure results in NULL being returned.
160*4882a593Smuzhiyun  */
dev_to_genpd_safe(struct device * dev)161*4882a593Smuzhiyun static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
164*4882a593Smuzhiyun 		return NULL;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* A genpd's always have its ->runtime_suspend() callback assigned. */
167*4882a593Smuzhiyun 	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
168*4882a593Smuzhiyun 		return pd_to_genpd(dev->pm_domain);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return NULL;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun  * This should only be used where we are certain that the pm_domain
175*4882a593Smuzhiyun  * attached to the device is a genpd domain.
176*4882a593Smuzhiyun  */
dev_to_genpd(struct device * dev)177*4882a593Smuzhiyun static struct generic_pm_domain *dev_to_genpd(struct device *dev)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dev->pm_domain))
180*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	return pd_to_genpd(dev->pm_domain);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
genpd_stop_dev(const struct generic_pm_domain * genpd,struct device * dev)185*4882a593Smuzhiyun static int genpd_stop_dev(const struct generic_pm_domain *genpd,
186*4882a593Smuzhiyun 			  struct device *dev)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
genpd_start_dev(const struct generic_pm_domain * genpd,struct device * dev)191*4882a593Smuzhiyun static int genpd_start_dev(const struct generic_pm_domain *genpd,
192*4882a593Smuzhiyun 			   struct device *dev)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
genpd_sd_counter_dec(struct generic_pm_domain * genpd)197*4882a593Smuzhiyun static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	bool ret = false;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
202*4882a593Smuzhiyun 		ret = !!atomic_dec_and_test(&genpd->sd_count);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	return ret;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
genpd_sd_counter_inc(struct generic_pm_domain * genpd)207*4882a593Smuzhiyun static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	atomic_inc(&genpd->sd_count);
210*4882a593Smuzhiyun 	smp_mb__after_atomic();
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
214*4882a593Smuzhiyun static struct dentry *genpd_debugfs_dir;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun static void genpd_debug_add(struct generic_pm_domain *genpd);
217*4882a593Smuzhiyun 
genpd_debug_remove(struct generic_pm_domain * genpd)218*4882a593Smuzhiyun static void genpd_debug_remove(struct generic_pm_domain *genpd)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	struct dentry *d;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (!genpd_debugfs_dir)
223*4882a593Smuzhiyun 		return;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
226*4882a593Smuzhiyun 	debugfs_remove(d);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
genpd_update_accounting(struct generic_pm_domain * genpd)229*4882a593Smuzhiyun static void genpd_update_accounting(struct generic_pm_domain *genpd)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	ktime_t delta, now;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	now = ktime_get();
234*4882a593Smuzhiyun 	delta = ktime_sub(now, genpd->accounting_time);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/*
237*4882a593Smuzhiyun 	 * If genpd->status is active, it means we are just
238*4882a593Smuzhiyun 	 * out of off and so update the idle time and vice
239*4882a593Smuzhiyun 	 * versa.
240*4882a593Smuzhiyun 	 */
241*4882a593Smuzhiyun 	if (genpd->status == GENPD_STATE_ON) {
242*4882a593Smuzhiyun 		int state_idx = genpd->state_idx;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		genpd->states[state_idx].idle_time =
245*4882a593Smuzhiyun 			ktime_add(genpd->states[state_idx].idle_time, delta);
246*4882a593Smuzhiyun 	} else {
247*4882a593Smuzhiyun 		genpd->on_time = ktime_add(genpd->on_time, delta);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	genpd->accounting_time = now;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun #else
genpd_debug_add(struct generic_pm_domain * genpd)253*4882a593Smuzhiyun static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
genpd_debug_remove(struct generic_pm_domain * genpd)254*4882a593Smuzhiyun static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
genpd_update_accounting(struct generic_pm_domain * genpd)255*4882a593Smuzhiyun static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
256*4882a593Smuzhiyun #endif
257*4882a593Smuzhiyun 
_genpd_reeval_performance_state(struct generic_pm_domain * genpd,unsigned int state)258*4882a593Smuzhiyun static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
259*4882a593Smuzhiyun 					   unsigned int state)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct generic_pm_domain_data *pd_data;
262*4882a593Smuzhiyun 	struct pm_domain_data *pdd;
263*4882a593Smuzhiyun 	struct gpd_link *link;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* New requested state is same as Max requested state */
266*4882a593Smuzhiyun 	if (state == genpd->performance_state)
267*4882a593Smuzhiyun 		return state;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* New requested state is higher than Max requested state */
270*4882a593Smuzhiyun 	if (state > genpd->performance_state)
271*4882a593Smuzhiyun 		return state;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* Traverse all devices within the domain */
274*4882a593Smuzhiyun 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
275*4882a593Smuzhiyun 		pd_data = to_gpd_data(pdd);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		if (pd_data->performance_state > state)
278*4882a593Smuzhiyun 			state = pd_data->performance_state;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/*
282*4882a593Smuzhiyun 	 * Traverse all sub-domains within the domain. This can be
283*4882a593Smuzhiyun 	 * done without any additional locking as the link->performance_state
284*4882a593Smuzhiyun 	 * field is protected by the parent genpd->lock, which is already taken.
285*4882a593Smuzhiyun 	 *
286*4882a593Smuzhiyun 	 * Also note that link->performance_state (subdomain's performance state
287*4882a593Smuzhiyun 	 * requirement to parent domain) is different from
288*4882a593Smuzhiyun 	 * link->child->performance_state (current performance state requirement
289*4882a593Smuzhiyun 	 * of the devices/sub-domains of the subdomain) and so can have a
290*4882a593Smuzhiyun 	 * different value.
291*4882a593Smuzhiyun 	 *
292*4882a593Smuzhiyun 	 * Note that we also take vote from powered-off sub-domains into account
293*4882a593Smuzhiyun 	 * as the same is done for devices right now.
294*4882a593Smuzhiyun 	 */
295*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
296*4882a593Smuzhiyun 		if (link->performance_state > state)
297*4882a593Smuzhiyun 			state = link->performance_state;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	return state;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
_genpd_set_performance_state(struct generic_pm_domain * genpd,unsigned int state,int depth)303*4882a593Smuzhiyun static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
304*4882a593Smuzhiyun 					unsigned int state, int depth)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct generic_pm_domain *parent;
307*4882a593Smuzhiyun 	struct gpd_link *link;
308*4882a593Smuzhiyun 	int parent_state, ret;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (state == genpd->performance_state)
311*4882a593Smuzhiyun 		return 0;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Propagate to parents of genpd */
314*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->child_links, child_node) {
315*4882a593Smuzhiyun 		parent = link->parent;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		if (!parent->set_performance_state)
318*4882a593Smuzhiyun 			continue;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 		/* Find parent's performance state */
321*4882a593Smuzhiyun 		ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
322*4882a593Smuzhiyun 							 parent->opp_table,
323*4882a593Smuzhiyun 							 state);
324*4882a593Smuzhiyun 		if (unlikely(ret < 0))
325*4882a593Smuzhiyun 			goto err;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		parent_state = ret;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 		genpd_lock_nested(parent, depth + 1);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		link->prev_performance_state = link->performance_state;
332*4882a593Smuzhiyun 		link->performance_state = parent_state;
333*4882a593Smuzhiyun 		parent_state = _genpd_reeval_performance_state(parent,
334*4882a593Smuzhiyun 						parent_state);
335*4882a593Smuzhiyun 		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
336*4882a593Smuzhiyun 		if (ret)
337*4882a593Smuzhiyun 			link->performance_state = link->prev_performance_state;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		genpd_unlock(parent);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		if (ret)
342*4882a593Smuzhiyun 			goto err;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	ret = genpd->set_performance_state(genpd, state);
346*4882a593Smuzhiyun 	if (ret)
347*4882a593Smuzhiyun 		goto err;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	genpd->performance_state = state;
350*4882a593Smuzhiyun 	return 0;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun err:
353*4882a593Smuzhiyun 	/* Encountered an error, lets rollback */
354*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(link, &genpd->child_links,
355*4882a593Smuzhiyun 					     child_node) {
356*4882a593Smuzhiyun 		parent = link->parent;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		if (!parent->set_performance_state)
359*4882a593Smuzhiyun 			continue;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		genpd_lock_nested(parent, depth + 1);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		parent_state = link->prev_performance_state;
364*4882a593Smuzhiyun 		link->performance_state = parent_state;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		parent_state = _genpd_reeval_performance_state(parent,
367*4882a593Smuzhiyun 						parent_state);
368*4882a593Smuzhiyun 		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
369*4882a593Smuzhiyun 			pr_err("%s: Failed to roll back to %d performance state\n",
370*4882a593Smuzhiyun 			       parent->name, parent_state);
371*4882a593Smuzhiyun 		}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		genpd_unlock(parent);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	return ret;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /**
380*4882a593Smuzhiyun  * dev_pm_genpd_set_performance_state- Set performance state of device's power
381*4882a593Smuzhiyun  * domain.
382*4882a593Smuzhiyun  *
383*4882a593Smuzhiyun  * @dev: Device for which the performance-state needs to be set.
384*4882a593Smuzhiyun  * @state: Target performance state of the device. This can be set as 0 when the
385*4882a593Smuzhiyun  *	   device doesn't have any performance state constraints left (And so
386*4882a593Smuzhiyun  *	   the device wouldn't participate anymore to find the target
387*4882a593Smuzhiyun  *	   performance state of the genpd).
388*4882a593Smuzhiyun  *
389*4882a593Smuzhiyun  * It is assumed that the users guarantee that the genpd wouldn't be detached
390*4882a593Smuzhiyun  * while this routine is getting called.
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * Returns 0 on success and negative error values on failures.
393*4882a593Smuzhiyun  */
dev_pm_genpd_set_performance_state(struct device * dev,unsigned int state)394*4882a593Smuzhiyun int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
397*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
398*4882a593Smuzhiyun 	unsigned int prev;
399*4882a593Smuzhiyun 	int ret;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	genpd = dev_to_genpd_safe(dev);
402*4882a593Smuzhiyun 	if (!genpd)
403*4882a593Smuzhiyun 		return -ENODEV;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (unlikely(!genpd->set_performance_state))
406*4882a593Smuzhiyun 		return -EINVAL;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (WARN_ON(!dev->power.subsys_data ||
409*4882a593Smuzhiyun 		     !dev->power.subsys_data->domain_data))
410*4882a593Smuzhiyun 		return -EINVAL;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	genpd_lock(genpd);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
415*4882a593Smuzhiyun 	prev = gpd_data->performance_state;
416*4882a593Smuzhiyun 	gpd_data->performance_state = state;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	state = _genpd_reeval_performance_state(genpd, state);
419*4882a593Smuzhiyun 	ret = _genpd_set_performance_state(genpd, state, 0);
420*4882a593Smuzhiyun 	if (ret)
421*4882a593Smuzhiyun 		gpd_data->performance_state = prev;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	genpd_unlock(genpd);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun  * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
431*4882a593Smuzhiyun  *
432*4882a593Smuzhiyun  * @dev: Device to handle
433*4882a593Smuzhiyun  * @next: impending interrupt/wakeup for the device
434*4882a593Smuzhiyun  *
435*4882a593Smuzhiyun  *
436*4882a593Smuzhiyun  * Allow devices to inform of the next wakeup. It's assumed that the users
437*4882a593Smuzhiyun  * guarantee that the genpd wouldn't be detached while this routine is getting
438*4882a593Smuzhiyun  * called. Additionally, it's also assumed that @dev isn't runtime suspended
439*4882a593Smuzhiyun  * (RPM_SUSPENDED)."
440*4882a593Smuzhiyun  * Although devices are expected to update the next_wakeup after the end of
441*4882a593Smuzhiyun  * their usecase as well, it is possible the devices themselves may not know
442*4882a593Smuzhiyun  * about that, so stale @next will be ignored when powering off the domain.
443*4882a593Smuzhiyun  */
dev_pm_genpd_set_next_wakeup(struct device * dev,ktime_t next)444*4882a593Smuzhiyun void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
447*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	genpd = dev_to_genpd_safe(dev);
450*4882a593Smuzhiyun 	if (!genpd)
451*4882a593Smuzhiyun 		return;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
454*4882a593Smuzhiyun 	gpd_data->next_wakeup = next;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
457*4882a593Smuzhiyun 
_genpd_power_on(struct generic_pm_domain * genpd,bool timed)458*4882a593Smuzhiyun static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	unsigned int state_idx = genpd->state_idx;
461*4882a593Smuzhiyun 	ktime_t time_start;
462*4882a593Smuzhiyun 	s64 elapsed_ns;
463*4882a593Smuzhiyun 	int ret;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* Notify consumers that we are about to power on. */
466*4882a593Smuzhiyun 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
467*4882a593Smuzhiyun 					     GENPD_NOTIFY_PRE_ON,
468*4882a593Smuzhiyun 					     GENPD_NOTIFY_OFF, NULL);
469*4882a593Smuzhiyun 	ret = notifier_to_errno(ret);
470*4882a593Smuzhiyun 	if (ret)
471*4882a593Smuzhiyun 		return ret;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (!genpd->power_on)
474*4882a593Smuzhiyun 		goto out;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (!timed) {
477*4882a593Smuzhiyun 		ret = genpd->power_on(genpd);
478*4882a593Smuzhiyun 		if (ret)
479*4882a593Smuzhiyun 			goto err;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		goto out;
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	time_start = ktime_get();
485*4882a593Smuzhiyun 	ret = genpd->power_on(genpd);
486*4882a593Smuzhiyun 	if (ret)
487*4882a593Smuzhiyun 		goto err;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
490*4882a593Smuzhiyun 	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
491*4882a593Smuzhiyun 		goto out;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
494*4882a593Smuzhiyun 	genpd->max_off_time_changed = true;
495*4882a593Smuzhiyun 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
496*4882a593Smuzhiyun 		 genpd->name, "on", elapsed_ns);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun out:
499*4882a593Smuzhiyun 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
500*4882a593Smuzhiyun 	return 0;
501*4882a593Smuzhiyun err:
502*4882a593Smuzhiyun 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
503*4882a593Smuzhiyun 				NULL);
504*4882a593Smuzhiyun 	return ret;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
_genpd_power_off(struct generic_pm_domain * genpd,bool timed)507*4882a593Smuzhiyun static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	unsigned int state_idx = genpd->state_idx;
510*4882a593Smuzhiyun 	ktime_t time_start;
511*4882a593Smuzhiyun 	s64 elapsed_ns;
512*4882a593Smuzhiyun 	int ret;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Notify consumers that we are about to power off. */
515*4882a593Smuzhiyun 	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
516*4882a593Smuzhiyun 					     GENPD_NOTIFY_PRE_OFF,
517*4882a593Smuzhiyun 					     GENPD_NOTIFY_ON, NULL);
518*4882a593Smuzhiyun 	ret = notifier_to_errno(ret);
519*4882a593Smuzhiyun 	if (ret)
520*4882a593Smuzhiyun 		return ret;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (!genpd->power_off)
523*4882a593Smuzhiyun 		goto out;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (!timed) {
526*4882a593Smuzhiyun 		ret = genpd->power_off(genpd);
527*4882a593Smuzhiyun 		if (ret)
528*4882a593Smuzhiyun 			goto busy;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		goto out;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	time_start = ktime_get();
534*4882a593Smuzhiyun 	ret = genpd->power_off(genpd);
535*4882a593Smuzhiyun 	if (ret)
536*4882a593Smuzhiyun 		goto busy;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
539*4882a593Smuzhiyun 	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
540*4882a593Smuzhiyun 		goto out;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
543*4882a593Smuzhiyun 	genpd->max_off_time_changed = true;
544*4882a593Smuzhiyun 	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
545*4882a593Smuzhiyun 		 genpd->name, "off", elapsed_ns);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun out:
548*4882a593Smuzhiyun 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
549*4882a593Smuzhiyun 				NULL);
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun busy:
552*4882a593Smuzhiyun 	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
553*4882a593Smuzhiyun 	return ret;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun  * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
558*4882a593Smuzhiyun  * @genpd: PM domain to power off.
559*4882a593Smuzhiyun  *
560*4882a593Smuzhiyun  * Queue up the execution of genpd_power_off() unless it's already been done
561*4882a593Smuzhiyun  * before.
562*4882a593Smuzhiyun  */
genpd_queue_power_off_work(struct generic_pm_domain * genpd)563*4882a593Smuzhiyun static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	queue_work(pm_wq, &genpd->power_off_work);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun /**
569*4882a593Smuzhiyun  * genpd_power_off - Remove power from a given PM domain.
570*4882a593Smuzhiyun  * @genpd: PM domain to power down.
571*4882a593Smuzhiyun  * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
572*4882a593Smuzhiyun  * RPM status of the releated device is in an intermediate state, not yet turned
573*4882a593Smuzhiyun  * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
574*4882a593Smuzhiyun  * be RPM_SUSPENDED, while it tries to power off the PM domain.
575*4882a593Smuzhiyun  *
576*4882a593Smuzhiyun  * If all of the @genpd's devices have been suspended and all of its subdomains
577*4882a593Smuzhiyun  * have been powered down, remove power from @genpd.
578*4882a593Smuzhiyun  */
genpd_power_off(struct generic_pm_domain * genpd,bool one_dev_on,unsigned int depth)579*4882a593Smuzhiyun static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
580*4882a593Smuzhiyun 			   unsigned int depth)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct pm_domain_data *pdd;
583*4882a593Smuzhiyun 	struct gpd_link *link;
584*4882a593Smuzhiyun 	unsigned int not_suspended = 0;
585*4882a593Smuzhiyun 	int ret;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/*
588*4882a593Smuzhiyun 	 * Do not try to power off the domain in the following situations:
589*4882a593Smuzhiyun 	 * (1) The domain is already in the "power off" state.
590*4882a593Smuzhiyun 	 * (2) System suspend is in progress.
591*4882a593Smuzhiyun 	 */
592*4882a593Smuzhiyun 	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
593*4882a593Smuzhiyun 		return 0;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	/*
596*4882a593Smuzhiyun 	 * Abort power off for the PM domain in the following situations:
597*4882a593Smuzhiyun 	 * (1) The domain is configured as always on.
598*4882a593Smuzhiyun 	 * (2) When the domain has a subdomain being powered on.
599*4882a593Smuzhiyun 	 */
600*4882a593Smuzhiyun 	if (genpd_is_always_on(genpd) ||
601*4882a593Smuzhiyun 			genpd_is_rpm_always_on(genpd) ||
602*4882a593Smuzhiyun 			atomic_read(&genpd->sd_count) > 0)
603*4882a593Smuzhiyun 		return -EBUSY;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
606*4882a593Smuzhiyun 		enum pm_qos_flags_status stat;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
609*4882a593Smuzhiyun 		if (stat > PM_QOS_FLAGS_NONE)
610*4882a593Smuzhiyun 			return -EBUSY;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 		/*
613*4882a593Smuzhiyun 		 * Do not allow PM domain to be powered off, when an IRQ safe
614*4882a593Smuzhiyun 		 * device is part of a non-IRQ safe domain.
615*4882a593Smuzhiyun 		 */
616*4882a593Smuzhiyun 		if (!pm_runtime_suspended(pdd->dev) ||
617*4882a593Smuzhiyun 			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
618*4882a593Smuzhiyun 			not_suspended++;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
622*4882a593Smuzhiyun 		return -EBUSY;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (genpd->gov && genpd->gov->power_down_ok) {
625*4882a593Smuzhiyun 		if (!genpd->gov->power_down_ok(&genpd->domain))
626*4882a593Smuzhiyun 			return -EAGAIN;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* Default to shallowest state. */
630*4882a593Smuzhiyun 	if (!genpd->gov)
631*4882a593Smuzhiyun 		genpd->state_idx = 0;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	/* Don't power off, if a child domain is waiting to power on. */
634*4882a593Smuzhiyun 	if (atomic_read(&genpd->sd_count) > 0)
635*4882a593Smuzhiyun 		return -EBUSY;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	ret = _genpd_power_off(genpd, true);
638*4882a593Smuzhiyun 	if (ret) {
639*4882a593Smuzhiyun 		genpd->states[genpd->state_idx].rejected++;
640*4882a593Smuzhiyun 		return ret;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	genpd->status = GENPD_STATE_OFF;
644*4882a593Smuzhiyun 	genpd_update_accounting(genpd);
645*4882a593Smuzhiyun 	genpd->states[genpd->state_idx].usage++;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->child_links, child_node) {
648*4882a593Smuzhiyun 		genpd_sd_counter_dec(link->parent);
649*4882a593Smuzhiyun 		genpd_lock_nested(link->parent, depth + 1);
650*4882a593Smuzhiyun 		genpd_power_off(link->parent, false, depth + 1);
651*4882a593Smuzhiyun 		genpd_unlock(link->parent);
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return 0;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /**
658*4882a593Smuzhiyun  * genpd_power_on - Restore power to a given PM domain and its parents.
659*4882a593Smuzhiyun  * @genpd: PM domain to power up.
660*4882a593Smuzhiyun  * @depth: nesting count for lockdep.
661*4882a593Smuzhiyun  *
662*4882a593Smuzhiyun  * Restore power to @genpd and all of its parents so that it is possible to
663*4882a593Smuzhiyun  * resume a device belonging to it.
664*4882a593Smuzhiyun  */
genpd_power_on(struct generic_pm_domain * genpd,unsigned int depth)665*4882a593Smuzhiyun static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	struct gpd_link *link;
668*4882a593Smuzhiyun 	int ret = 0;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	if (genpd_status_on(genpd))
671*4882a593Smuzhiyun 		return 0;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	/*
674*4882a593Smuzhiyun 	 * The list is guaranteed not to change while the loop below is being
675*4882a593Smuzhiyun 	 * executed, unless one of the parents' .power_on() callbacks fiddles
676*4882a593Smuzhiyun 	 * with it.
677*4882a593Smuzhiyun 	 */
678*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->child_links, child_node) {
679*4882a593Smuzhiyun 		struct generic_pm_domain *parent = link->parent;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		genpd_sd_counter_inc(parent);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		genpd_lock_nested(parent, depth + 1);
684*4882a593Smuzhiyun 		ret = genpd_power_on(parent, depth + 1);
685*4882a593Smuzhiyun 		genpd_unlock(parent);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		if (ret) {
688*4882a593Smuzhiyun 			genpd_sd_counter_dec(parent);
689*4882a593Smuzhiyun 			goto err;
690*4882a593Smuzhiyun 		}
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	ret = _genpd_power_on(genpd, true);
694*4882a593Smuzhiyun 	if (ret)
695*4882a593Smuzhiyun 		goto err;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	genpd->status = GENPD_STATE_ON;
698*4882a593Smuzhiyun 	genpd_update_accounting(genpd);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return 0;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun  err:
703*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(link,
704*4882a593Smuzhiyun 					&genpd->child_links,
705*4882a593Smuzhiyun 					child_node) {
706*4882a593Smuzhiyun 		genpd_sd_counter_dec(link->parent);
707*4882a593Smuzhiyun 		genpd_lock_nested(link->parent, depth + 1);
708*4882a593Smuzhiyun 		genpd_power_off(link->parent, false, depth + 1);
709*4882a593Smuzhiyun 		genpd_unlock(link->parent);
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return ret;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
genpd_dev_pm_start(struct device * dev)715*4882a593Smuzhiyun static int genpd_dev_pm_start(struct device *dev)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = dev_to_genpd(dev);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	return genpd_start_dev(genpd, dev);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
genpd_dev_pm_qos_notifier(struct notifier_block * nb,unsigned long val,void * ptr)722*4882a593Smuzhiyun static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
723*4882a593Smuzhiyun 				     unsigned long val, void *ptr)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
726*4882a593Smuzhiyun 	struct device *dev;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
729*4882a593Smuzhiyun 	dev = gpd_data->base.dev;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	for (;;) {
732*4882a593Smuzhiyun 		struct generic_pm_domain *genpd;
733*4882a593Smuzhiyun 		struct pm_domain_data *pdd;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 		spin_lock_irq(&dev->power.lock);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 		pdd = dev->power.subsys_data ?
738*4882a593Smuzhiyun 				dev->power.subsys_data->domain_data : NULL;
739*4882a593Smuzhiyun 		if (pdd) {
740*4882a593Smuzhiyun 			to_gpd_data(pdd)->td.constraint_changed = true;
741*4882a593Smuzhiyun 			genpd = dev_to_genpd(dev);
742*4882a593Smuzhiyun 		} else {
743*4882a593Smuzhiyun 			genpd = ERR_PTR(-ENODATA);
744*4882a593Smuzhiyun 		}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 		spin_unlock_irq(&dev->power.lock);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 		if (!IS_ERR(genpd)) {
749*4882a593Smuzhiyun 			genpd_lock(genpd);
750*4882a593Smuzhiyun 			genpd->max_off_time_changed = true;
751*4882a593Smuzhiyun 			genpd_unlock(genpd);
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		dev = dev->parent;
755*4882a593Smuzhiyun 		if (!dev || dev->power.ignore_children)
756*4882a593Smuzhiyun 			break;
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	return NOTIFY_DONE;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun /**
763*4882a593Smuzhiyun  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
764*4882a593Smuzhiyun  * @work: Work structure used for scheduling the execution of this function.
765*4882a593Smuzhiyun  */
genpd_power_off_work_fn(struct work_struct * work)766*4882a593Smuzhiyun static void genpd_power_off_work_fn(struct work_struct *work)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	genpd_lock(genpd);
773*4882a593Smuzhiyun 	genpd_power_off(genpd, false, 0);
774*4882a593Smuzhiyun 	genpd_unlock(genpd);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /**
778*4882a593Smuzhiyun  * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
779*4882a593Smuzhiyun  * @dev: Device to handle.
780*4882a593Smuzhiyun  */
__genpd_runtime_suspend(struct device * dev)781*4882a593Smuzhiyun static int __genpd_runtime_suspend(struct device *dev)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	int (*cb)(struct device *__dev);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (dev->type && dev->type->pm)
786*4882a593Smuzhiyun 		cb = dev->type->pm->runtime_suspend;
787*4882a593Smuzhiyun 	else if (dev->class && dev->class->pm)
788*4882a593Smuzhiyun 		cb = dev->class->pm->runtime_suspend;
789*4882a593Smuzhiyun 	else if (dev->bus && dev->bus->pm)
790*4882a593Smuzhiyun 		cb = dev->bus->pm->runtime_suspend;
791*4882a593Smuzhiyun 	else
792*4882a593Smuzhiyun 		cb = NULL;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (!cb && dev->driver && dev->driver->pm)
795*4882a593Smuzhiyun 		cb = dev->driver->pm->runtime_suspend;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	return cb ? cb(dev) : 0;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun /**
801*4882a593Smuzhiyun  * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
802*4882a593Smuzhiyun  * @dev: Device to handle.
803*4882a593Smuzhiyun  */
__genpd_runtime_resume(struct device * dev)804*4882a593Smuzhiyun static int __genpd_runtime_resume(struct device *dev)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	int (*cb)(struct device *__dev);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (dev->type && dev->type->pm)
809*4882a593Smuzhiyun 		cb = dev->type->pm->runtime_resume;
810*4882a593Smuzhiyun 	else if (dev->class && dev->class->pm)
811*4882a593Smuzhiyun 		cb = dev->class->pm->runtime_resume;
812*4882a593Smuzhiyun 	else if (dev->bus && dev->bus->pm)
813*4882a593Smuzhiyun 		cb = dev->bus->pm->runtime_resume;
814*4882a593Smuzhiyun 	else
815*4882a593Smuzhiyun 		cb = NULL;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	if (!cb && dev->driver && dev->driver->pm)
818*4882a593Smuzhiyun 		cb = dev->driver->pm->runtime_resume;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	return cb ? cb(dev) : 0;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun /**
824*4882a593Smuzhiyun  * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
825*4882a593Smuzhiyun  * @dev: Device to suspend.
826*4882a593Smuzhiyun  *
827*4882a593Smuzhiyun  * Carry out a runtime suspend of a device under the assumption that its
828*4882a593Smuzhiyun  * pm_domain field points to the domain member of an object of type
829*4882a593Smuzhiyun  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
830*4882a593Smuzhiyun  */
genpd_runtime_suspend(struct device * dev)831*4882a593Smuzhiyun static int genpd_runtime_suspend(struct device *dev)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
834*4882a593Smuzhiyun 	bool (*suspend_ok)(struct device *__dev);
835*4882a593Smuzhiyun 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
836*4882a593Smuzhiyun 	bool runtime_pm = pm_runtime_enabled(dev);
837*4882a593Smuzhiyun 	ktime_t time_start;
838*4882a593Smuzhiyun 	s64 elapsed_ns;
839*4882a593Smuzhiyun 	int ret;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
844*4882a593Smuzhiyun 	if (IS_ERR(genpd))
845*4882a593Smuzhiyun 		return -EINVAL;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/*
848*4882a593Smuzhiyun 	 * A runtime PM centric subsystem/driver may re-use the runtime PM
849*4882a593Smuzhiyun 	 * callbacks for other purposes than runtime PM. In those scenarios
850*4882a593Smuzhiyun 	 * runtime PM is disabled. Under these circumstances, we shall skip
851*4882a593Smuzhiyun 	 * validating/measuring the PM QoS latency.
852*4882a593Smuzhiyun 	 */
853*4882a593Smuzhiyun 	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
854*4882a593Smuzhiyun 	if (runtime_pm && suspend_ok && !suspend_ok(dev))
855*4882a593Smuzhiyun 		return -EBUSY;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	/* Measure suspend latency. */
858*4882a593Smuzhiyun 	time_start = 0;
859*4882a593Smuzhiyun 	if (runtime_pm)
860*4882a593Smuzhiyun 		time_start = ktime_get();
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	ret = __genpd_runtime_suspend(dev);
863*4882a593Smuzhiyun 	if (ret)
864*4882a593Smuzhiyun 		return ret;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	ret = genpd_stop_dev(genpd, dev);
867*4882a593Smuzhiyun 	if (ret) {
868*4882a593Smuzhiyun 		__genpd_runtime_resume(dev);
869*4882a593Smuzhiyun 		return ret;
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* Update suspend latency value if the measured time exceeds it. */
873*4882a593Smuzhiyun 	if (runtime_pm) {
874*4882a593Smuzhiyun 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
875*4882a593Smuzhiyun 		if (elapsed_ns > td->suspend_latency_ns) {
876*4882a593Smuzhiyun 			td->suspend_latency_ns = elapsed_ns;
877*4882a593Smuzhiyun 			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
878*4882a593Smuzhiyun 				elapsed_ns);
879*4882a593Smuzhiyun 			genpd->max_off_time_changed = true;
880*4882a593Smuzhiyun 			td->constraint_changed = true;
881*4882a593Smuzhiyun 		}
882*4882a593Smuzhiyun 	}
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/*
885*4882a593Smuzhiyun 	 * If power.irq_safe is set, this routine may be run with
886*4882a593Smuzhiyun 	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
887*4882a593Smuzhiyun 	 */
888*4882a593Smuzhiyun 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
889*4882a593Smuzhiyun 		return 0;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	genpd_lock(genpd);
892*4882a593Smuzhiyun 	genpd_power_off(genpd, true, 0);
893*4882a593Smuzhiyun 	genpd_unlock(genpd);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	return 0;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun /**
899*4882a593Smuzhiyun  * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
900*4882a593Smuzhiyun  * @dev: Device to resume.
901*4882a593Smuzhiyun  *
902*4882a593Smuzhiyun  * Carry out a runtime resume of a device under the assumption that its
903*4882a593Smuzhiyun  * pm_domain field points to the domain member of an object of type
904*4882a593Smuzhiyun  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
905*4882a593Smuzhiyun  */
genpd_runtime_resume(struct device * dev)906*4882a593Smuzhiyun static int genpd_runtime_resume(struct device *dev)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
909*4882a593Smuzhiyun 	struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
910*4882a593Smuzhiyun 	bool runtime_pm = pm_runtime_enabled(dev);
911*4882a593Smuzhiyun 	ktime_t time_start;
912*4882a593Smuzhiyun 	s64 elapsed_ns;
913*4882a593Smuzhiyun 	int ret;
914*4882a593Smuzhiyun 	bool timed = true;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
919*4882a593Smuzhiyun 	if (IS_ERR(genpd))
920*4882a593Smuzhiyun 		return -EINVAL;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	/*
923*4882a593Smuzhiyun 	 * As we don't power off a non IRQ safe domain, which holds
924*4882a593Smuzhiyun 	 * an IRQ safe device, we don't need to restore power to it.
925*4882a593Smuzhiyun 	 */
926*4882a593Smuzhiyun 	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
927*4882a593Smuzhiyun 		timed = false;
928*4882a593Smuzhiyun 		goto out;
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	genpd_lock(genpd);
932*4882a593Smuzhiyun 	ret = genpd_power_on(genpd, 0);
933*4882a593Smuzhiyun 	genpd_unlock(genpd);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if (ret)
936*4882a593Smuzhiyun 		return ret;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun  out:
939*4882a593Smuzhiyun 	/* Measure resume latency. */
940*4882a593Smuzhiyun 	time_start = 0;
941*4882a593Smuzhiyun 	if (timed && runtime_pm)
942*4882a593Smuzhiyun 		time_start = ktime_get();
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	ret = genpd_start_dev(genpd, dev);
945*4882a593Smuzhiyun 	if (ret)
946*4882a593Smuzhiyun 		goto err_poweroff;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	ret = __genpd_runtime_resume(dev);
949*4882a593Smuzhiyun 	if (ret)
950*4882a593Smuzhiyun 		goto err_stop;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/* Update resume latency value if the measured time exceeds it. */
953*4882a593Smuzhiyun 	if (timed && runtime_pm) {
954*4882a593Smuzhiyun 		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
955*4882a593Smuzhiyun 		if (elapsed_ns > td->resume_latency_ns) {
956*4882a593Smuzhiyun 			td->resume_latency_ns = elapsed_ns;
957*4882a593Smuzhiyun 			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
958*4882a593Smuzhiyun 				elapsed_ns);
959*4882a593Smuzhiyun 			genpd->max_off_time_changed = true;
960*4882a593Smuzhiyun 			td->constraint_changed = true;
961*4882a593Smuzhiyun 		}
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	return 0;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun err_stop:
967*4882a593Smuzhiyun 	genpd_stop_dev(genpd, dev);
968*4882a593Smuzhiyun err_poweroff:
969*4882a593Smuzhiyun 	if (!pm_runtime_is_irq_safe(dev) ||
970*4882a593Smuzhiyun 		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
971*4882a593Smuzhiyun 		genpd_lock(genpd);
972*4882a593Smuzhiyun 		genpd_power_off(genpd, true, 0);
973*4882a593Smuzhiyun 		genpd_unlock(genpd);
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	return ret;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun static bool pd_ignore_unused;
pd_ignore_unused_setup(char * __unused)980*4882a593Smuzhiyun static int __init pd_ignore_unused_setup(char *__unused)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun 	pd_ignore_unused = true;
983*4882a593Smuzhiyun 	return 1;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun __setup("pd_ignore_unused", pd_ignore_unused_setup);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun /**
988*4882a593Smuzhiyun  * genpd_power_off_unused - Power off all PM domains with no devices in use.
989*4882a593Smuzhiyun  */
genpd_power_off_unused(void)990*4882a593Smuzhiyun static int __init genpd_power_off_unused(void)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	if (pd_ignore_unused) {
995*4882a593Smuzhiyun 		pr_warn("genpd: Not disabling unused power domains\n");
996*4882a593Smuzhiyun 		return 0;
997*4882a593Smuzhiyun 	}
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1002*4882a593Smuzhiyun 		genpd_queue_power_off_work(genpd);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	return 0;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun late_initcall(genpd_power_off_unused);
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun /**
1013*4882a593Smuzhiyun  * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1014*4882a593Smuzhiyun  * @genpd: PM domain to power off, if possible.
1015*4882a593Smuzhiyun  * @use_lock: use the lock.
1016*4882a593Smuzhiyun  * @depth: nesting count for lockdep.
1017*4882a593Smuzhiyun  *
1018*4882a593Smuzhiyun  * Check if the given PM domain can be powered off (during system suspend or
1019*4882a593Smuzhiyun  * hibernation) and do that if so.  Also, in that case propagate to its parents.
1020*4882a593Smuzhiyun  *
1021*4882a593Smuzhiyun  * This function is only called in "noirq" and "syscore" stages of system power
1022*4882a593Smuzhiyun  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1023*4882a593Smuzhiyun  * these cases the lock must be held.
1024*4882a593Smuzhiyun  */
genpd_sync_power_off(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1025*4882a593Smuzhiyun static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1026*4882a593Smuzhiyun 				 unsigned int depth)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	struct gpd_link *link;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1031*4882a593Smuzhiyun 		return;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	if (genpd->suspended_count != genpd->device_count
1034*4882a593Smuzhiyun 	    || atomic_read(&genpd->sd_count) > 0)
1035*4882a593Smuzhiyun 		return;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* Choose the deepest state when suspending */
1038*4882a593Smuzhiyun 	genpd->state_idx = genpd->state_count - 1;
1039*4882a593Smuzhiyun 	if (_genpd_power_off(genpd, false))
1040*4882a593Smuzhiyun 		return;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	genpd->status = GENPD_STATE_OFF;
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->child_links, child_node) {
1045*4882a593Smuzhiyun 		genpd_sd_counter_dec(link->parent);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		if (use_lock)
1048*4882a593Smuzhiyun 			genpd_lock_nested(link->parent, depth + 1);
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 		if (use_lock)
1053*4882a593Smuzhiyun 			genpd_unlock(link->parent);
1054*4882a593Smuzhiyun 	}
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /**
1058*4882a593Smuzhiyun  * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1059*4882a593Smuzhiyun  * @genpd: PM domain to power on.
1060*4882a593Smuzhiyun  * @use_lock: use the lock.
1061*4882a593Smuzhiyun  * @depth: nesting count for lockdep.
1062*4882a593Smuzhiyun  *
1063*4882a593Smuzhiyun  * This function is only called in "noirq" and "syscore" stages of system power
1064*4882a593Smuzhiyun  * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1065*4882a593Smuzhiyun  * these cases the lock must be held.
1066*4882a593Smuzhiyun  */
genpd_sync_power_on(struct generic_pm_domain * genpd,bool use_lock,unsigned int depth)1067*4882a593Smuzhiyun static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1068*4882a593Smuzhiyun 				unsigned int depth)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun 	struct gpd_link *link;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	if (genpd_status_on(genpd))
1073*4882a593Smuzhiyun 		return;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->child_links, child_node) {
1076*4882a593Smuzhiyun 		genpd_sd_counter_inc(link->parent);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		if (use_lock)
1079*4882a593Smuzhiyun 			genpd_lock_nested(link->parent, depth + 1);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		if (use_lock)
1084*4882a593Smuzhiyun 			genpd_unlock(link->parent);
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	_genpd_power_on(genpd, false);
1088*4882a593Smuzhiyun 	genpd->status = GENPD_STATE_ON;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun /**
1092*4882a593Smuzhiyun  * resume_needed - Check whether to resume a device before system suspend.
1093*4882a593Smuzhiyun  * @dev: Device to check.
1094*4882a593Smuzhiyun  * @genpd: PM domain the device belongs to.
1095*4882a593Smuzhiyun  *
1096*4882a593Smuzhiyun  * There are two cases in which a device that can wake up the system from sleep
1097*4882a593Smuzhiyun  * states should be resumed by genpd_prepare(): (1) if the device is enabled
1098*4882a593Smuzhiyun  * to wake up the system and it has to remain active for this purpose while the
1099*4882a593Smuzhiyun  * system is in the sleep state and (2) if the device is not enabled to wake up
1100*4882a593Smuzhiyun  * the system from sleep states and it generally doesn't generate wakeup signals
1101*4882a593Smuzhiyun  * by itself (those signals are generated on its behalf by other parts of the
1102*4882a593Smuzhiyun  * system).  In the latter case it may be necessary to reconfigure the device's
1103*4882a593Smuzhiyun  * wakeup settings during system suspend, because it may have been set up to
1104*4882a593Smuzhiyun  * signal remote wakeup from the system's working state as needed by runtime PM.
1105*4882a593Smuzhiyun  * Return 'true' in either of the above cases.
1106*4882a593Smuzhiyun  */
resume_needed(struct device * dev,const struct generic_pm_domain * genpd)1107*4882a593Smuzhiyun static bool resume_needed(struct device *dev,
1108*4882a593Smuzhiyun 			  const struct generic_pm_domain *genpd)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	bool active_wakeup;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	if (!device_can_wakeup(dev))
1113*4882a593Smuzhiyun 		return false;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	active_wakeup = genpd_is_active_wakeup(genpd);
1116*4882a593Smuzhiyun 	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun /**
1120*4882a593Smuzhiyun  * genpd_prepare - Start power transition of a device in a PM domain.
1121*4882a593Smuzhiyun  * @dev: Device to start the transition of.
1122*4882a593Smuzhiyun  *
1123*4882a593Smuzhiyun  * Start a power transition of a device (during a system-wide power transition)
1124*4882a593Smuzhiyun  * under the assumption that its pm_domain field points to the domain member of
1125*4882a593Smuzhiyun  * an object of type struct generic_pm_domain representing a PM domain
1126*4882a593Smuzhiyun  * consisting of I/O devices.
1127*4882a593Smuzhiyun  */
genpd_prepare(struct device * dev)1128*4882a593Smuzhiyun static int genpd_prepare(struct device *dev)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1131*4882a593Smuzhiyun 	int ret;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1136*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1137*4882a593Smuzhiyun 		return -EINVAL;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	/*
1140*4882a593Smuzhiyun 	 * If a wakeup request is pending for the device, it should be woken up
1141*4882a593Smuzhiyun 	 * at this point and a system wakeup event should be reported if it's
1142*4882a593Smuzhiyun 	 * set up to wake up the system from sleep states.
1143*4882a593Smuzhiyun 	 */
1144*4882a593Smuzhiyun 	if (resume_needed(dev, genpd))
1145*4882a593Smuzhiyun 		pm_runtime_resume(dev);
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	genpd_lock(genpd);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (genpd->prepared_count++ == 0)
1150*4882a593Smuzhiyun 		genpd->suspended_count = 0;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	genpd_unlock(genpd);
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	ret = pm_generic_prepare(dev);
1155*4882a593Smuzhiyun 	if (ret < 0) {
1156*4882a593Smuzhiyun 		genpd_lock(genpd);
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 		genpd->prepared_count--;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 		genpd_unlock(genpd);
1161*4882a593Smuzhiyun 	}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	/* Never return 1, as genpd don't cope with the direct_complete path. */
1164*4882a593Smuzhiyun 	return ret >= 0 ? 0 : ret;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun /**
1168*4882a593Smuzhiyun  * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1169*4882a593Smuzhiyun  *   I/O pm domain.
1170*4882a593Smuzhiyun  * @dev: Device to suspend.
1171*4882a593Smuzhiyun  * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
1172*4882a593Smuzhiyun  *
1173*4882a593Smuzhiyun  * Stop the device and remove power from the domain if all devices in it have
1174*4882a593Smuzhiyun  * been stopped.
1175*4882a593Smuzhiyun  */
genpd_finish_suspend(struct device * dev,bool poweroff)1176*4882a593Smuzhiyun static int genpd_finish_suspend(struct device *dev, bool poweroff)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1179*4882a593Smuzhiyun 	int ret = 0;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1182*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1183*4882a593Smuzhiyun 		return -EINVAL;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	if (poweroff)
1186*4882a593Smuzhiyun 		ret = pm_generic_poweroff_noirq(dev);
1187*4882a593Smuzhiyun 	else
1188*4882a593Smuzhiyun 		ret = pm_generic_suspend_noirq(dev);
1189*4882a593Smuzhiyun 	if (ret)
1190*4882a593Smuzhiyun 		return ret;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1193*4882a593Smuzhiyun 		return 0;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1196*4882a593Smuzhiyun 	    !pm_runtime_status_suspended(dev)) {
1197*4882a593Smuzhiyun 		ret = genpd_stop_dev(genpd, dev);
1198*4882a593Smuzhiyun 		if (ret) {
1199*4882a593Smuzhiyun 			if (poweroff)
1200*4882a593Smuzhiyun 				pm_generic_restore_noirq(dev);
1201*4882a593Smuzhiyun 			else
1202*4882a593Smuzhiyun 				pm_generic_resume_noirq(dev);
1203*4882a593Smuzhiyun 			return ret;
1204*4882a593Smuzhiyun 		}
1205*4882a593Smuzhiyun 	}
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	genpd_lock(genpd);
1208*4882a593Smuzhiyun 	genpd->suspended_count++;
1209*4882a593Smuzhiyun 	genpd_sync_power_off(genpd, true, 0);
1210*4882a593Smuzhiyun 	genpd_unlock(genpd);
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	return 0;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun /**
1216*4882a593Smuzhiyun  * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1217*4882a593Smuzhiyun  * @dev: Device to suspend.
1218*4882a593Smuzhiyun  *
1219*4882a593Smuzhiyun  * Stop the device and remove power from the domain if all devices in it have
1220*4882a593Smuzhiyun  * been stopped.
1221*4882a593Smuzhiyun  */
genpd_suspend_noirq(struct device * dev)1222*4882a593Smuzhiyun static int genpd_suspend_noirq(struct device *dev)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	return genpd_finish_suspend(dev, false);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun /**
1230*4882a593Smuzhiyun  * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1231*4882a593Smuzhiyun  * @dev: Device to resume.
1232*4882a593Smuzhiyun  *
1233*4882a593Smuzhiyun  * Restore power to the device's PM domain, if necessary, and start the device.
1234*4882a593Smuzhiyun  */
genpd_resume_noirq(struct device * dev)1235*4882a593Smuzhiyun static int genpd_resume_noirq(struct device *dev)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1238*4882a593Smuzhiyun 	int ret;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1243*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1244*4882a593Smuzhiyun 		return -EINVAL;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1247*4882a593Smuzhiyun 		return pm_generic_resume_noirq(dev);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	genpd_lock(genpd);
1250*4882a593Smuzhiyun 	genpd_sync_power_on(genpd, true, 0);
1251*4882a593Smuzhiyun 	genpd->suspended_count--;
1252*4882a593Smuzhiyun 	genpd_unlock(genpd);
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1255*4882a593Smuzhiyun 	    !pm_runtime_status_suspended(dev)) {
1256*4882a593Smuzhiyun 		ret = genpd_start_dev(genpd, dev);
1257*4882a593Smuzhiyun 		if (ret)
1258*4882a593Smuzhiyun 			return ret;
1259*4882a593Smuzhiyun 	}
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	return pm_generic_resume_noirq(dev);
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun /**
1265*4882a593Smuzhiyun  * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1266*4882a593Smuzhiyun  * @dev: Device to freeze.
1267*4882a593Smuzhiyun  *
1268*4882a593Smuzhiyun  * Carry out a late freeze of a device under the assumption that its
1269*4882a593Smuzhiyun  * pm_domain field points to the domain member of an object of type
1270*4882a593Smuzhiyun  * struct generic_pm_domain representing a power domain consisting of I/O
1271*4882a593Smuzhiyun  * devices.
1272*4882a593Smuzhiyun  */
genpd_freeze_noirq(struct device * dev)1273*4882a593Smuzhiyun static int genpd_freeze_noirq(struct device *dev)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun 	const struct generic_pm_domain *genpd;
1276*4882a593Smuzhiyun 	int ret = 0;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1281*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1282*4882a593Smuzhiyun 		return -EINVAL;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	ret = pm_generic_freeze_noirq(dev);
1285*4882a593Smuzhiyun 	if (ret)
1286*4882a593Smuzhiyun 		return ret;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1289*4882a593Smuzhiyun 	    !pm_runtime_status_suspended(dev))
1290*4882a593Smuzhiyun 		ret = genpd_stop_dev(genpd, dev);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	return ret;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun /**
1296*4882a593Smuzhiyun  * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1297*4882a593Smuzhiyun  * @dev: Device to thaw.
1298*4882a593Smuzhiyun  *
1299*4882a593Smuzhiyun  * Start the device, unless power has been removed from the domain already
1300*4882a593Smuzhiyun  * before the system transition.
1301*4882a593Smuzhiyun  */
genpd_thaw_noirq(struct device * dev)1302*4882a593Smuzhiyun static int genpd_thaw_noirq(struct device *dev)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	const struct generic_pm_domain *genpd;
1305*4882a593Smuzhiyun 	int ret = 0;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1310*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1311*4882a593Smuzhiyun 		return -EINVAL;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1314*4882a593Smuzhiyun 	    !pm_runtime_status_suspended(dev)) {
1315*4882a593Smuzhiyun 		ret = genpd_start_dev(genpd, dev);
1316*4882a593Smuzhiyun 		if (ret)
1317*4882a593Smuzhiyun 			return ret;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	return pm_generic_thaw_noirq(dev);
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun /**
1324*4882a593Smuzhiyun  * genpd_poweroff_noirq - Completion of hibernation of device in an
1325*4882a593Smuzhiyun  *   I/O PM domain.
1326*4882a593Smuzhiyun  * @dev: Device to poweroff.
1327*4882a593Smuzhiyun  *
1328*4882a593Smuzhiyun  * Stop the device and remove power from the domain if all devices in it have
1329*4882a593Smuzhiyun  * been stopped.
1330*4882a593Smuzhiyun  */
genpd_poweroff_noirq(struct device * dev)1331*4882a593Smuzhiyun static int genpd_poweroff_noirq(struct device *dev)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	return genpd_finish_suspend(dev, true);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun /**
1339*4882a593Smuzhiyun  * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1340*4882a593Smuzhiyun  * @dev: Device to resume.
1341*4882a593Smuzhiyun  *
1342*4882a593Smuzhiyun  * Make sure the domain will be in the same power state as before the
1343*4882a593Smuzhiyun  * hibernation the system is resuming from and start the device if necessary.
1344*4882a593Smuzhiyun  */
genpd_restore_noirq(struct device * dev)1345*4882a593Smuzhiyun static int genpd_restore_noirq(struct device *dev)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1348*4882a593Smuzhiyun 	int ret = 0;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1353*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1354*4882a593Smuzhiyun 		return -EINVAL;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	/*
1357*4882a593Smuzhiyun 	 * At this point suspended_count == 0 means we are being run for the
1358*4882a593Smuzhiyun 	 * first time for the given domain in the present cycle.
1359*4882a593Smuzhiyun 	 */
1360*4882a593Smuzhiyun 	genpd_lock(genpd);
1361*4882a593Smuzhiyun 	if (genpd->suspended_count++ == 0) {
1362*4882a593Smuzhiyun 		/*
1363*4882a593Smuzhiyun 		 * The boot kernel might put the domain into arbitrary state,
1364*4882a593Smuzhiyun 		 * so make it appear as powered off to genpd_sync_power_on(),
1365*4882a593Smuzhiyun 		 * so that it tries to power it on in case it was really off.
1366*4882a593Smuzhiyun 		 */
1367*4882a593Smuzhiyun 		genpd->status = GENPD_STATE_OFF;
1368*4882a593Smuzhiyun 	}
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	genpd_sync_power_on(genpd, true, 0);
1371*4882a593Smuzhiyun 	genpd_unlock(genpd);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1374*4882a593Smuzhiyun 	    !pm_runtime_status_suspended(dev)) {
1375*4882a593Smuzhiyun 		ret = genpd_start_dev(genpd, dev);
1376*4882a593Smuzhiyun 		if (ret)
1377*4882a593Smuzhiyun 			return ret;
1378*4882a593Smuzhiyun 	}
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	return pm_generic_restore_noirq(dev);
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun /**
1384*4882a593Smuzhiyun  * genpd_complete - Complete power transition of a device in a power domain.
1385*4882a593Smuzhiyun  * @dev: Device to complete the transition of.
1386*4882a593Smuzhiyun  *
1387*4882a593Smuzhiyun  * Complete a power transition of a device (during a system-wide power
1388*4882a593Smuzhiyun  * transition) under the assumption that its pm_domain field points to the
1389*4882a593Smuzhiyun  * domain member of an object of type struct generic_pm_domain representing
1390*4882a593Smuzhiyun  * a power domain consisting of I/O devices.
1391*4882a593Smuzhiyun  */
genpd_complete(struct device * dev)1392*4882a593Smuzhiyun static void genpd_complete(struct device *dev)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	genpd = dev_to_genpd(dev);
1399*4882a593Smuzhiyun 	if (IS_ERR(genpd))
1400*4882a593Smuzhiyun 		return;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	pm_generic_complete(dev);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	genpd_lock(genpd);
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	genpd->prepared_count--;
1407*4882a593Smuzhiyun 	if (!genpd->prepared_count)
1408*4882a593Smuzhiyun 		genpd_queue_power_off_work(genpd);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	genpd_unlock(genpd);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun 
genpd_switch_state(struct device * dev,bool suspend)1413*4882a593Smuzhiyun static void genpd_switch_state(struct device *dev, bool suspend)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1416*4882a593Smuzhiyun 	bool use_lock;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	genpd = dev_to_genpd_safe(dev);
1419*4882a593Smuzhiyun 	if (!genpd)
1420*4882a593Smuzhiyun 		return;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	use_lock = genpd_is_irq_safe(genpd);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	if (use_lock)
1425*4882a593Smuzhiyun 		genpd_lock(genpd);
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	if (suspend) {
1428*4882a593Smuzhiyun 		genpd->suspended_count++;
1429*4882a593Smuzhiyun 		genpd_sync_power_off(genpd, use_lock, 0);
1430*4882a593Smuzhiyun 	} else {
1431*4882a593Smuzhiyun 		genpd_sync_power_on(genpd, use_lock, 0);
1432*4882a593Smuzhiyun 		genpd->suspended_count--;
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	if (use_lock)
1436*4882a593Smuzhiyun 		genpd_unlock(genpd);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun /**
1440*4882a593Smuzhiyun  * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1441*4882a593Smuzhiyun  * @dev: The device that is attached to the genpd, that can be suspended.
1442*4882a593Smuzhiyun  *
1443*4882a593Smuzhiyun  * This routine should typically be called for a device that needs to be
1444*4882a593Smuzhiyun  * suspended during the syscore suspend phase. It may also be called during
1445*4882a593Smuzhiyun  * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1446*4882a593Smuzhiyun  * genpd.
1447*4882a593Smuzhiyun  */
dev_pm_genpd_suspend(struct device * dev)1448*4882a593Smuzhiyun void dev_pm_genpd_suspend(struct device *dev)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun 	genpd_switch_state(dev, true);
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun /**
1455*4882a593Smuzhiyun  * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1456*4882a593Smuzhiyun  * @dev: The device that is attached to the genpd, which needs to be resumed.
1457*4882a593Smuzhiyun  *
1458*4882a593Smuzhiyun  * This routine should typically be called for a device that needs to be resumed
1459*4882a593Smuzhiyun  * during the syscore resume phase. It may also be called during suspend-to-idle
1460*4882a593Smuzhiyun  * to resume a corresponding CPU device that is attached to a genpd.
1461*4882a593Smuzhiyun  */
dev_pm_genpd_resume(struct device * dev)1462*4882a593Smuzhiyun void dev_pm_genpd_resume(struct device *dev)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun 	genpd_switch_state(dev, false);
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun #else /* !CONFIG_PM_SLEEP */
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun #define genpd_prepare		NULL
1471*4882a593Smuzhiyun #define genpd_suspend_noirq	NULL
1472*4882a593Smuzhiyun #define genpd_resume_noirq	NULL
1473*4882a593Smuzhiyun #define genpd_freeze_noirq	NULL
1474*4882a593Smuzhiyun #define genpd_thaw_noirq	NULL
1475*4882a593Smuzhiyun #define genpd_poweroff_noirq	NULL
1476*4882a593Smuzhiyun #define genpd_restore_noirq	NULL
1477*4882a593Smuzhiyun #define genpd_complete		NULL
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
1480*4882a593Smuzhiyun 
genpd_alloc_dev_data(struct device * dev)1481*4882a593Smuzhiyun static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
1484*4882a593Smuzhiyun 	int ret;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	ret = dev_pm_get_subsys_data(dev);
1487*4882a593Smuzhiyun 	if (ret)
1488*4882a593Smuzhiyun 		return ERR_PTR(ret);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1491*4882a593Smuzhiyun 	if (!gpd_data) {
1492*4882a593Smuzhiyun 		ret = -ENOMEM;
1493*4882a593Smuzhiyun 		goto err_put;
1494*4882a593Smuzhiyun 	}
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	gpd_data->base.dev = dev;
1497*4882a593Smuzhiyun 	gpd_data->td.constraint_changed = true;
1498*4882a593Smuzhiyun 	gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1499*4882a593Smuzhiyun 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1500*4882a593Smuzhiyun 	gpd_data->next_wakeup = KTIME_MAX;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	spin_lock_irq(&dev->power.lock);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	if (dev->power.subsys_data->domain_data) {
1505*4882a593Smuzhiyun 		ret = -EINVAL;
1506*4882a593Smuzhiyun 		goto err_free;
1507*4882a593Smuzhiyun 	}
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	dev->power.subsys_data->domain_data = &gpd_data->base;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	spin_unlock_irq(&dev->power.lock);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	return gpd_data;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun  err_free:
1516*4882a593Smuzhiyun 	spin_unlock_irq(&dev->power.lock);
1517*4882a593Smuzhiyun 	kfree(gpd_data);
1518*4882a593Smuzhiyun  err_put:
1519*4882a593Smuzhiyun 	dev_pm_put_subsys_data(dev);
1520*4882a593Smuzhiyun 	return ERR_PTR(ret);
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun 
genpd_free_dev_data(struct device * dev,struct generic_pm_domain_data * gpd_data)1523*4882a593Smuzhiyun static void genpd_free_dev_data(struct device *dev,
1524*4882a593Smuzhiyun 				struct generic_pm_domain_data *gpd_data)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun 	spin_lock_irq(&dev->power.lock);
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	dev->power.subsys_data->domain_data = NULL;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	spin_unlock_irq(&dev->power.lock);
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	kfree(gpd_data);
1533*4882a593Smuzhiyun 	dev_pm_put_subsys_data(dev);
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun 
genpd_update_cpumask(struct generic_pm_domain * genpd,int cpu,bool set,unsigned int depth)1536*4882a593Smuzhiyun static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1537*4882a593Smuzhiyun 				 int cpu, bool set, unsigned int depth)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun 	struct gpd_link *link;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	if (!genpd_is_cpu_domain(genpd))
1542*4882a593Smuzhiyun 		return;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->child_links, child_node) {
1545*4882a593Smuzhiyun 		struct generic_pm_domain *parent = link->parent;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		genpd_lock_nested(parent, depth + 1);
1548*4882a593Smuzhiyun 		genpd_update_cpumask(parent, cpu, set, depth + 1);
1549*4882a593Smuzhiyun 		genpd_unlock(parent);
1550*4882a593Smuzhiyun 	}
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	if (set)
1553*4882a593Smuzhiyun 		cpumask_set_cpu(cpu, genpd->cpus);
1554*4882a593Smuzhiyun 	else
1555*4882a593Smuzhiyun 		cpumask_clear_cpu(cpu, genpd->cpus);
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun 
genpd_set_cpumask(struct generic_pm_domain * genpd,int cpu)1558*4882a593Smuzhiyun static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun 	if (cpu >= 0)
1561*4882a593Smuzhiyun 		genpd_update_cpumask(genpd, cpu, true, 0);
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun 
genpd_clear_cpumask(struct generic_pm_domain * genpd,int cpu)1564*4882a593Smuzhiyun static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun 	if (cpu >= 0)
1567*4882a593Smuzhiyun 		genpd_update_cpumask(genpd, cpu, false, 0);
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun 
genpd_get_cpu(struct generic_pm_domain * genpd,struct device * dev)1570*4882a593Smuzhiyun static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1571*4882a593Smuzhiyun {
1572*4882a593Smuzhiyun 	int cpu;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	if (!genpd_is_cpu_domain(genpd))
1575*4882a593Smuzhiyun 		return -1;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
1578*4882a593Smuzhiyun 		if (get_cpu_device(cpu) == dev)
1579*4882a593Smuzhiyun 			return cpu;
1580*4882a593Smuzhiyun 	}
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	return -1;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun 
genpd_add_device(struct generic_pm_domain * genpd,struct device * dev,struct device * base_dev)1585*4882a593Smuzhiyun static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1586*4882a593Smuzhiyun 			    struct device *base_dev)
1587*4882a593Smuzhiyun {
1588*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
1589*4882a593Smuzhiyun 	int ret;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1594*4882a593Smuzhiyun 		return -EINVAL;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	gpd_data = genpd_alloc_dev_data(dev);
1597*4882a593Smuzhiyun 	if (IS_ERR(gpd_data))
1598*4882a593Smuzhiyun 		return PTR_ERR(gpd_data);
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1603*4882a593Smuzhiyun 	if (ret)
1604*4882a593Smuzhiyun 		goto out;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	genpd_lock(genpd);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	genpd_set_cpumask(genpd, gpd_data->cpu);
1609*4882a593Smuzhiyun 	dev_pm_domain_set(dev, &genpd->domain);
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	genpd->device_count++;
1612*4882a593Smuzhiyun 	genpd->max_off_time_changed = true;
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	genpd_unlock(genpd);
1617*4882a593Smuzhiyun  out:
1618*4882a593Smuzhiyun 	if (ret)
1619*4882a593Smuzhiyun 		genpd_free_dev_data(dev, gpd_data);
1620*4882a593Smuzhiyun 	else
1621*4882a593Smuzhiyun 		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1622*4882a593Smuzhiyun 					DEV_PM_QOS_RESUME_LATENCY);
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	return ret;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun /**
1628*4882a593Smuzhiyun  * pm_genpd_add_device - Add a device to an I/O PM domain.
1629*4882a593Smuzhiyun  * @genpd: PM domain to add the device to.
1630*4882a593Smuzhiyun  * @dev: Device to be added.
1631*4882a593Smuzhiyun  */
pm_genpd_add_device(struct generic_pm_domain * genpd,struct device * dev)1632*4882a593Smuzhiyun int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	int ret;
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
1637*4882a593Smuzhiyun 	ret = genpd_add_device(genpd, dev, dev);
1638*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	return ret;
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1643*4882a593Smuzhiyun 
genpd_remove_device(struct generic_pm_domain * genpd,struct device * dev)1644*4882a593Smuzhiyun static int genpd_remove_device(struct generic_pm_domain *genpd,
1645*4882a593Smuzhiyun 			       struct device *dev)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
1648*4882a593Smuzhiyun 	struct pm_domain_data *pdd;
1649*4882a593Smuzhiyun 	int ret = 0;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	dev_dbg(dev, "%s()\n", __func__);
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	pdd = dev->power.subsys_data->domain_data;
1654*4882a593Smuzhiyun 	gpd_data = to_gpd_data(pdd);
1655*4882a593Smuzhiyun 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1656*4882a593Smuzhiyun 				   DEV_PM_QOS_RESUME_LATENCY);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	genpd_lock(genpd);
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	if (genpd->prepared_count > 0) {
1661*4882a593Smuzhiyun 		ret = -EAGAIN;
1662*4882a593Smuzhiyun 		goto out;
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	genpd->device_count--;
1666*4882a593Smuzhiyun 	genpd->max_off_time_changed = true;
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	genpd_clear_cpumask(genpd, gpd_data->cpu);
1669*4882a593Smuzhiyun 	dev_pm_domain_set(dev, NULL);
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	list_del_init(&pdd->list_node);
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	genpd_unlock(genpd);
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	if (genpd->detach_dev)
1676*4882a593Smuzhiyun 		genpd->detach_dev(genpd, dev);
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	genpd_free_dev_data(dev, gpd_data);
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	return 0;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun  out:
1683*4882a593Smuzhiyun 	genpd_unlock(genpd);
1684*4882a593Smuzhiyun 	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	return ret;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun /**
1690*4882a593Smuzhiyun  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1691*4882a593Smuzhiyun  * @dev: Device to be removed.
1692*4882a593Smuzhiyun  */
pm_genpd_remove_device(struct device * dev)1693*4882a593Smuzhiyun int pm_genpd_remove_device(struct device *dev)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	if (!genpd)
1698*4882a593Smuzhiyun 		return -EINVAL;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	return genpd_remove_device(genpd, dev);
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun /**
1705*4882a593Smuzhiyun  * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1706*4882a593Smuzhiyun  *
1707*4882a593Smuzhiyun  * @dev: Device that should be associated with the notifier
1708*4882a593Smuzhiyun  * @nb: The notifier block to register
1709*4882a593Smuzhiyun  *
1710*4882a593Smuzhiyun  * Users may call this function to add a genpd power on/off notifier for an
1711*4882a593Smuzhiyun  * attached @dev. Only one notifier per device is allowed. The notifier is
1712*4882a593Smuzhiyun  * sent when genpd is powering on/off the PM domain.
1713*4882a593Smuzhiyun  *
1714*4882a593Smuzhiyun  * It is assumed that the user guarantee that the genpd wouldn't be detached
1715*4882a593Smuzhiyun  * while this routine is getting called.
1716*4882a593Smuzhiyun  *
1717*4882a593Smuzhiyun  * Returns 0 on success and negative error values on failures.
1718*4882a593Smuzhiyun  */
dev_pm_genpd_add_notifier(struct device * dev,struct notifier_block * nb)1719*4882a593Smuzhiyun int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1722*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
1723*4882a593Smuzhiyun 	int ret;
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	genpd = dev_to_genpd_safe(dev);
1726*4882a593Smuzhiyun 	if (!genpd)
1727*4882a593Smuzhiyun 		return -ENODEV;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	if (WARN_ON(!dev->power.subsys_data ||
1730*4882a593Smuzhiyun 		     !dev->power.subsys_data->domain_data))
1731*4882a593Smuzhiyun 		return -EINVAL;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1734*4882a593Smuzhiyun 	if (gpd_data->power_nb)
1735*4882a593Smuzhiyun 		return -EEXIST;
1736*4882a593Smuzhiyun 
1737*4882a593Smuzhiyun 	genpd_lock(genpd);
1738*4882a593Smuzhiyun 	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1739*4882a593Smuzhiyun 	genpd_unlock(genpd);
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	if (ret) {
1742*4882a593Smuzhiyun 		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1743*4882a593Smuzhiyun 			 genpd->name);
1744*4882a593Smuzhiyun 		return ret;
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	gpd_data->power_nb = nb;
1748*4882a593Smuzhiyun 	return 0;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun /**
1753*4882a593Smuzhiyun  * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1754*4882a593Smuzhiyun  *
1755*4882a593Smuzhiyun  * @dev: Device that is associated with the notifier
1756*4882a593Smuzhiyun  *
1757*4882a593Smuzhiyun  * Users may call this function to remove a genpd power on/off notifier for an
1758*4882a593Smuzhiyun  * attached @dev.
1759*4882a593Smuzhiyun  *
1760*4882a593Smuzhiyun  * It is assumed that the user guarantee that the genpd wouldn't be detached
1761*4882a593Smuzhiyun  * while this routine is getting called.
1762*4882a593Smuzhiyun  *
1763*4882a593Smuzhiyun  * Returns 0 on success and negative error values on failures.
1764*4882a593Smuzhiyun  */
dev_pm_genpd_remove_notifier(struct device * dev)1765*4882a593Smuzhiyun int dev_pm_genpd_remove_notifier(struct device *dev)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
1768*4882a593Smuzhiyun 	struct generic_pm_domain_data *gpd_data;
1769*4882a593Smuzhiyun 	int ret;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	genpd = dev_to_genpd_safe(dev);
1772*4882a593Smuzhiyun 	if (!genpd)
1773*4882a593Smuzhiyun 		return -ENODEV;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	if (WARN_ON(!dev->power.subsys_data ||
1776*4882a593Smuzhiyun 		     !dev->power.subsys_data->domain_data))
1777*4882a593Smuzhiyun 		return -EINVAL;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1780*4882a593Smuzhiyun 	if (!gpd_data->power_nb)
1781*4882a593Smuzhiyun 		return -ENODEV;
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	genpd_lock(genpd);
1784*4882a593Smuzhiyun 	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1785*4882a593Smuzhiyun 					    gpd_data->power_nb);
1786*4882a593Smuzhiyun 	genpd_unlock(genpd);
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	if (ret) {
1789*4882a593Smuzhiyun 		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1790*4882a593Smuzhiyun 			 genpd->name);
1791*4882a593Smuzhiyun 		return ret;
1792*4882a593Smuzhiyun 	}
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	gpd_data->power_nb = NULL;
1795*4882a593Smuzhiyun 	return 0;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1798*4882a593Smuzhiyun 
genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1799*4882a593Smuzhiyun static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1800*4882a593Smuzhiyun 			       struct generic_pm_domain *subdomain)
1801*4882a593Smuzhiyun {
1802*4882a593Smuzhiyun 	struct gpd_link *link, *itr;
1803*4882a593Smuzhiyun 	int ret = 0;
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1806*4882a593Smuzhiyun 	    || genpd == subdomain)
1807*4882a593Smuzhiyun 		return -EINVAL;
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	/*
1810*4882a593Smuzhiyun 	 * If the domain can be powered on/off in an IRQ safe
1811*4882a593Smuzhiyun 	 * context, ensure that the subdomain can also be
1812*4882a593Smuzhiyun 	 * powered on/off in that context.
1813*4882a593Smuzhiyun 	 */
1814*4882a593Smuzhiyun 	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1815*4882a593Smuzhiyun 		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1816*4882a593Smuzhiyun 				genpd->name, subdomain->name);
1817*4882a593Smuzhiyun 		return -EINVAL;
1818*4882a593Smuzhiyun 	}
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1821*4882a593Smuzhiyun 	if (!link)
1822*4882a593Smuzhiyun 		return -ENOMEM;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	genpd_lock(subdomain);
1825*4882a593Smuzhiyun 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1828*4882a593Smuzhiyun 		ret = -EINVAL;
1829*4882a593Smuzhiyun 		goto out;
1830*4882a593Smuzhiyun 	}
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1833*4882a593Smuzhiyun 		if (itr->child == subdomain && itr->parent == genpd) {
1834*4882a593Smuzhiyun 			ret = -EINVAL;
1835*4882a593Smuzhiyun 			goto out;
1836*4882a593Smuzhiyun 		}
1837*4882a593Smuzhiyun 	}
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	link->parent = genpd;
1840*4882a593Smuzhiyun 	list_add_tail(&link->parent_node, &genpd->parent_links);
1841*4882a593Smuzhiyun 	link->child = subdomain;
1842*4882a593Smuzhiyun 	list_add_tail(&link->child_node, &subdomain->child_links);
1843*4882a593Smuzhiyun 	if (genpd_status_on(subdomain))
1844*4882a593Smuzhiyun 		genpd_sd_counter_inc(genpd);
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun  out:
1847*4882a593Smuzhiyun 	genpd_unlock(genpd);
1848*4882a593Smuzhiyun 	genpd_unlock(subdomain);
1849*4882a593Smuzhiyun 	if (ret)
1850*4882a593Smuzhiyun 		kfree(link);
1851*4882a593Smuzhiyun 	return ret;
1852*4882a593Smuzhiyun }
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun /**
1855*4882a593Smuzhiyun  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1856*4882a593Smuzhiyun  * @genpd: Leader PM domain to add the subdomain to.
1857*4882a593Smuzhiyun  * @subdomain: Subdomain to be added.
1858*4882a593Smuzhiyun  */
pm_genpd_add_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1859*4882a593Smuzhiyun int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1860*4882a593Smuzhiyun 			   struct generic_pm_domain *subdomain)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun 	int ret;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
1865*4882a593Smuzhiyun 	ret = genpd_add_subdomain(genpd, subdomain);
1866*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	return ret;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun /**
1873*4882a593Smuzhiyun  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1874*4882a593Smuzhiyun  * @genpd: Leader PM domain to remove the subdomain from.
1875*4882a593Smuzhiyun  * @subdomain: Subdomain to be removed.
1876*4882a593Smuzhiyun  */
pm_genpd_remove_subdomain(struct generic_pm_domain * genpd,struct generic_pm_domain * subdomain)1877*4882a593Smuzhiyun int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1878*4882a593Smuzhiyun 			      struct generic_pm_domain *subdomain)
1879*4882a593Smuzhiyun {
1880*4882a593Smuzhiyun 	struct gpd_link *l, *link;
1881*4882a593Smuzhiyun 	int ret = -EINVAL;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1884*4882a593Smuzhiyun 		return -EINVAL;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	genpd_lock(subdomain);
1887*4882a593Smuzhiyun 	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1890*4882a593Smuzhiyun 		pr_warn("%s: unable to remove subdomain %s\n",
1891*4882a593Smuzhiyun 			genpd->name, subdomain->name);
1892*4882a593Smuzhiyun 		ret = -EBUSY;
1893*4882a593Smuzhiyun 		goto out;
1894*4882a593Smuzhiyun 	}
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1897*4882a593Smuzhiyun 		if (link->child != subdomain)
1898*4882a593Smuzhiyun 			continue;
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 		list_del(&link->parent_node);
1901*4882a593Smuzhiyun 		list_del(&link->child_node);
1902*4882a593Smuzhiyun 		kfree(link);
1903*4882a593Smuzhiyun 		if (genpd_status_on(subdomain))
1904*4882a593Smuzhiyun 			genpd_sd_counter_dec(genpd);
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 		ret = 0;
1907*4882a593Smuzhiyun 		break;
1908*4882a593Smuzhiyun 	}
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun out:
1911*4882a593Smuzhiyun 	genpd_unlock(genpd);
1912*4882a593Smuzhiyun 	genpd_unlock(subdomain);
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	return ret;
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1917*4882a593Smuzhiyun 
genpd_free_default_power_state(struct genpd_power_state * states,unsigned int state_count)1918*4882a593Smuzhiyun static void genpd_free_default_power_state(struct genpd_power_state *states,
1919*4882a593Smuzhiyun 					   unsigned int state_count)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	kfree(states);
1922*4882a593Smuzhiyun }
1923*4882a593Smuzhiyun 
genpd_set_default_power_state(struct generic_pm_domain * genpd)1924*4882a593Smuzhiyun static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1925*4882a593Smuzhiyun {
1926*4882a593Smuzhiyun 	struct genpd_power_state *state;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1929*4882a593Smuzhiyun 	if (!state)
1930*4882a593Smuzhiyun 		return -ENOMEM;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	genpd->states = state;
1933*4882a593Smuzhiyun 	genpd->state_count = 1;
1934*4882a593Smuzhiyun 	genpd->free_states = genpd_free_default_power_state;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	return 0;
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun 
genpd_lock_init(struct generic_pm_domain * genpd)1939*4882a593Smuzhiyun static void genpd_lock_init(struct generic_pm_domain *genpd)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun 	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1942*4882a593Smuzhiyun 		spin_lock_init(&genpd->slock);
1943*4882a593Smuzhiyun 		genpd->lock_ops = &genpd_spin_ops;
1944*4882a593Smuzhiyun 	} else {
1945*4882a593Smuzhiyun 		mutex_init(&genpd->mlock);
1946*4882a593Smuzhiyun 		genpd->lock_ops = &genpd_mtx_ops;
1947*4882a593Smuzhiyun 	}
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun /**
1951*4882a593Smuzhiyun  * pm_genpd_init - Initialize a generic I/O PM domain object.
1952*4882a593Smuzhiyun  * @genpd: PM domain object to initialize.
1953*4882a593Smuzhiyun  * @gov: PM domain governor to associate with the domain (may be NULL).
1954*4882a593Smuzhiyun  * @is_off: Initial value of the domain's power_is_off field.
1955*4882a593Smuzhiyun  *
1956*4882a593Smuzhiyun  * Returns 0 on successful initialization, else a negative error code.
1957*4882a593Smuzhiyun  */
pm_genpd_init(struct generic_pm_domain * genpd,struct dev_power_governor * gov,bool is_off)1958*4882a593Smuzhiyun int pm_genpd_init(struct generic_pm_domain *genpd,
1959*4882a593Smuzhiyun 		  struct dev_power_governor *gov, bool is_off)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun 	int ret;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(genpd))
1964*4882a593Smuzhiyun 		return -EINVAL;
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	INIT_LIST_HEAD(&genpd->parent_links);
1967*4882a593Smuzhiyun 	INIT_LIST_HEAD(&genpd->child_links);
1968*4882a593Smuzhiyun 	INIT_LIST_HEAD(&genpd->dev_list);
1969*4882a593Smuzhiyun 	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
1970*4882a593Smuzhiyun 	genpd_lock_init(genpd);
1971*4882a593Smuzhiyun 	genpd->gov = gov;
1972*4882a593Smuzhiyun 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1973*4882a593Smuzhiyun 	atomic_set(&genpd->sd_count, 0);
1974*4882a593Smuzhiyun 	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
1975*4882a593Smuzhiyun 	genpd->device_count = 0;
1976*4882a593Smuzhiyun 	genpd->max_off_time_ns = -1;
1977*4882a593Smuzhiyun 	genpd->max_off_time_changed = true;
1978*4882a593Smuzhiyun 	genpd->next_wakeup = KTIME_MAX;
1979*4882a593Smuzhiyun 	genpd->provider = NULL;
1980*4882a593Smuzhiyun 	genpd->has_provider = false;
1981*4882a593Smuzhiyun 	genpd->accounting_time = ktime_get();
1982*4882a593Smuzhiyun 	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1983*4882a593Smuzhiyun 	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1984*4882a593Smuzhiyun 	genpd->domain.ops.prepare = genpd_prepare;
1985*4882a593Smuzhiyun 	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1986*4882a593Smuzhiyun 	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1987*4882a593Smuzhiyun 	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1988*4882a593Smuzhiyun 	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1989*4882a593Smuzhiyun 	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1990*4882a593Smuzhiyun 	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1991*4882a593Smuzhiyun 	genpd->domain.ops.complete = genpd_complete;
1992*4882a593Smuzhiyun 	genpd->domain.start = genpd_dev_pm_start;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	if (genpd->flags & GENPD_FLAG_PM_CLK) {
1995*4882a593Smuzhiyun 		genpd->dev_ops.stop = pm_clk_suspend;
1996*4882a593Smuzhiyun 		genpd->dev_ops.start = pm_clk_resume;
1997*4882a593Smuzhiyun 	}
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 	/* Always-on domains must be powered on at initialization. */
2000*4882a593Smuzhiyun 	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2001*4882a593Smuzhiyun 			!genpd_status_on(genpd))
2002*4882a593Smuzhiyun 		return -EINVAL;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	if (genpd_is_cpu_domain(genpd) &&
2005*4882a593Smuzhiyun 	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2006*4882a593Smuzhiyun 		return -ENOMEM;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	/* Use only one "off" state if there were no states declared */
2009*4882a593Smuzhiyun 	if (genpd->state_count == 0) {
2010*4882a593Smuzhiyun 		ret = genpd_set_default_power_state(genpd);
2011*4882a593Smuzhiyun 		if (ret) {
2012*4882a593Smuzhiyun 			if (genpd_is_cpu_domain(genpd))
2013*4882a593Smuzhiyun 				free_cpumask_var(genpd->cpus);
2014*4882a593Smuzhiyun 			return ret;
2015*4882a593Smuzhiyun 		}
2016*4882a593Smuzhiyun 	} else if (!gov && genpd->state_count > 1) {
2017*4882a593Smuzhiyun 		pr_warn("%s: no governor for states\n", genpd->name);
2018*4882a593Smuzhiyun 	}
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 	device_initialize(&genpd->dev);
2021*4882a593Smuzhiyun 	dev_set_name(&genpd->dev, "%s", genpd->name);
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2024*4882a593Smuzhiyun 	list_add(&genpd->gpd_list_node, &gpd_list);
2025*4882a593Smuzhiyun 	genpd_debug_add(genpd);
2026*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	return 0;
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_init);
2031*4882a593Smuzhiyun 
genpd_remove(struct generic_pm_domain * genpd)2032*4882a593Smuzhiyun static int genpd_remove(struct generic_pm_domain *genpd)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun 	struct gpd_link *l, *link;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(genpd))
2037*4882a593Smuzhiyun 		return -EINVAL;
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	genpd_lock(genpd);
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	if (genpd->has_provider) {
2042*4882a593Smuzhiyun 		genpd_unlock(genpd);
2043*4882a593Smuzhiyun 		pr_err("Provider present, unable to remove %s\n", genpd->name);
2044*4882a593Smuzhiyun 		return -EBUSY;
2045*4882a593Smuzhiyun 	}
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2048*4882a593Smuzhiyun 		genpd_unlock(genpd);
2049*4882a593Smuzhiyun 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2050*4882a593Smuzhiyun 		return -EBUSY;
2051*4882a593Smuzhiyun 	}
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2054*4882a593Smuzhiyun 		list_del(&link->parent_node);
2055*4882a593Smuzhiyun 		list_del(&link->child_node);
2056*4882a593Smuzhiyun 		kfree(link);
2057*4882a593Smuzhiyun 	}
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	list_del(&genpd->gpd_list_node);
2060*4882a593Smuzhiyun 	genpd_unlock(genpd);
2061*4882a593Smuzhiyun 	genpd_debug_remove(genpd);
2062*4882a593Smuzhiyun 	cancel_work_sync(&genpd->power_off_work);
2063*4882a593Smuzhiyun 	if (genpd_is_cpu_domain(genpd))
2064*4882a593Smuzhiyun 		free_cpumask_var(genpd->cpus);
2065*4882a593Smuzhiyun 	if (genpd->free_states)
2066*4882a593Smuzhiyun 		genpd->free_states(genpd->states, genpd->state_count);
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	pr_debug("%s: removed %s\n", __func__, genpd->name);
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	return 0;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun /**
2074*4882a593Smuzhiyun  * pm_genpd_remove - Remove a generic I/O PM domain
2075*4882a593Smuzhiyun  * @genpd: Pointer to PM domain that is to be removed.
2076*4882a593Smuzhiyun  *
2077*4882a593Smuzhiyun  * To remove the PM domain, this function:
2078*4882a593Smuzhiyun  *  - Removes the PM domain as a subdomain to any parent domains,
2079*4882a593Smuzhiyun  *    if it was added.
2080*4882a593Smuzhiyun  *  - Removes the PM domain from the list of registered PM domains.
2081*4882a593Smuzhiyun  *
2082*4882a593Smuzhiyun  * The PM domain will only be removed, if the associated provider has
2083*4882a593Smuzhiyun  * been removed, it is not a parent to any other PM domain and has no
2084*4882a593Smuzhiyun  * devices associated with it.
2085*4882a593Smuzhiyun  */
pm_genpd_remove(struct generic_pm_domain * genpd)2086*4882a593Smuzhiyun int pm_genpd_remove(struct generic_pm_domain *genpd)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun 	int ret;
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2091*4882a593Smuzhiyun 	ret = genpd_remove(genpd);
2092*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	return ret;
2095*4882a593Smuzhiyun }
2096*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_remove);
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun /*
2101*4882a593Smuzhiyun  * Device Tree based PM domain providers.
2102*4882a593Smuzhiyun  *
2103*4882a593Smuzhiyun  * The code below implements generic device tree based PM domain providers that
2104*4882a593Smuzhiyun  * bind device tree nodes with generic PM domains registered in the system.
2105*4882a593Smuzhiyun  *
2106*4882a593Smuzhiyun  * Any driver that registers generic PM domains and needs to support binding of
2107*4882a593Smuzhiyun  * devices to these domains is supposed to register a PM domain provider, which
2108*4882a593Smuzhiyun  * maps a PM domain specifier retrieved from the device tree to a PM domain.
2109*4882a593Smuzhiyun  *
2110*4882a593Smuzhiyun  * Two simple mapping functions have been provided for convenience:
2111*4882a593Smuzhiyun  *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2112*4882a593Smuzhiyun  *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2113*4882a593Smuzhiyun  *    index.
2114*4882a593Smuzhiyun  */
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun /**
2117*4882a593Smuzhiyun  * struct of_genpd_provider - PM domain provider registration structure
2118*4882a593Smuzhiyun  * @link: Entry in global list of PM domain providers
2119*4882a593Smuzhiyun  * @node: Pointer to device tree node of PM domain provider
2120*4882a593Smuzhiyun  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2121*4882a593Smuzhiyun  *         into a PM domain.
2122*4882a593Smuzhiyun  * @data: context pointer to be passed into @xlate callback
2123*4882a593Smuzhiyun  */
2124*4882a593Smuzhiyun struct of_genpd_provider {
2125*4882a593Smuzhiyun 	struct list_head link;
2126*4882a593Smuzhiyun 	struct device_node *node;
2127*4882a593Smuzhiyun 	genpd_xlate_t xlate;
2128*4882a593Smuzhiyun 	void *data;
2129*4882a593Smuzhiyun };
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun /* List of registered PM domain providers. */
2132*4882a593Smuzhiyun static LIST_HEAD(of_genpd_providers);
2133*4882a593Smuzhiyun /* Mutex to protect the list above. */
2134*4882a593Smuzhiyun static DEFINE_MUTEX(of_genpd_mutex);
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun /**
2137*4882a593Smuzhiyun  * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2138*4882a593Smuzhiyun  * @genpdspec: OF phandle args to map into a PM domain
2139*4882a593Smuzhiyun  * @data: xlate function private data - pointer to struct generic_pm_domain
2140*4882a593Smuzhiyun  *
2141*4882a593Smuzhiyun  * This is a generic xlate function that can be used to model PM domains that
2142*4882a593Smuzhiyun  * have their own device tree nodes. The private data of xlate function needs
2143*4882a593Smuzhiyun  * to be a valid pointer to struct generic_pm_domain.
2144*4882a593Smuzhiyun  */
genpd_xlate_simple(struct of_phandle_args * genpdspec,void * data)2145*4882a593Smuzhiyun static struct generic_pm_domain *genpd_xlate_simple(
2146*4882a593Smuzhiyun 					struct of_phandle_args *genpdspec,
2147*4882a593Smuzhiyun 					void *data)
2148*4882a593Smuzhiyun {
2149*4882a593Smuzhiyun 	return data;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun /**
2153*4882a593Smuzhiyun  * genpd_xlate_onecell() - Xlate function using a single index.
2154*4882a593Smuzhiyun  * @genpdspec: OF phandle args to map into a PM domain
2155*4882a593Smuzhiyun  * @data: xlate function private data - pointer to struct genpd_onecell_data
2156*4882a593Smuzhiyun  *
2157*4882a593Smuzhiyun  * This is a generic xlate function that can be used to model simple PM domain
2158*4882a593Smuzhiyun  * controllers that have one device tree node and provide multiple PM domains.
2159*4882a593Smuzhiyun  * A single cell is used as an index into an array of PM domains specified in
2160*4882a593Smuzhiyun  * the genpd_onecell_data struct when registering the provider.
2161*4882a593Smuzhiyun  */
genpd_xlate_onecell(struct of_phandle_args * genpdspec,void * data)2162*4882a593Smuzhiyun static struct generic_pm_domain *genpd_xlate_onecell(
2163*4882a593Smuzhiyun 					struct of_phandle_args *genpdspec,
2164*4882a593Smuzhiyun 					void *data)
2165*4882a593Smuzhiyun {
2166*4882a593Smuzhiyun 	struct genpd_onecell_data *genpd_data = data;
2167*4882a593Smuzhiyun 	unsigned int idx = genpdspec->args[0];
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	if (genpdspec->args_count != 1)
2170*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 	if (idx >= genpd_data->num_domains) {
2173*4882a593Smuzhiyun 		pr_err("%s: invalid domain index %u\n", __func__, idx);
2174*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
2175*4882a593Smuzhiyun 	}
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	if (!genpd_data->domains[idx])
2178*4882a593Smuzhiyun 		return ERR_PTR(-ENOENT);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	return genpd_data->domains[idx];
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun /**
2184*4882a593Smuzhiyun  * genpd_add_provider() - Register a PM domain provider for a node
2185*4882a593Smuzhiyun  * @np: Device node pointer associated with the PM domain provider.
2186*4882a593Smuzhiyun  * @xlate: Callback for decoding PM domain from phandle arguments.
2187*4882a593Smuzhiyun  * @data: Context pointer for @xlate callback.
2188*4882a593Smuzhiyun  */
genpd_add_provider(struct device_node * np,genpd_xlate_t xlate,void * data)2189*4882a593Smuzhiyun static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2190*4882a593Smuzhiyun 			      void *data)
2191*4882a593Smuzhiyun {
2192*4882a593Smuzhiyun 	struct of_genpd_provider *cp;
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2195*4882a593Smuzhiyun 	if (!cp)
2196*4882a593Smuzhiyun 		return -ENOMEM;
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 	cp->node = of_node_get(np);
2199*4882a593Smuzhiyun 	cp->data = data;
2200*4882a593Smuzhiyun 	cp->xlate = xlate;
2201*4882a593Smuzhiyun 	fwnode_dev_initialized(&np->fwnode, true);
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun 	mutex_lock(&of_genpd_mutex);
2204*4882a593Smuzhiyun 	list_add(&cp->link, &of_genpd_providers);
2205*4882a593Smuzhiyun 	mutex_unlock(&of_genpd_mutex);
2206*4882a593Smuzhiyun 	pr_debug("Added domain provider from %pOF\n", np);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	return 0;
2209*4882a593Smuzhiyun }
2210*4882a593Smuzhiyun 
genpd_present(const struct generic_pm_domain * genpd)2211*4882a593Smuzhiyun static bool genpd_present(const struct generic_pm_domain *genpd)
2212*4882a593Smuzhiyun {
2213*4882a593Smuzhiyun 	const struct generic_pm_domain *gpd;
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2216*4882a593Smuzhiyun 		if (gpd == genpd)
2217*4882a593Smuzhiyun 			return true;
2218*4882a593Smuzhiyun 	return false;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun /**
2222*4882a593Smuzhiyun  * of_genpd_add_provider_simple() - Register a simple PM domain provider
2223*4882a593Smuzhiyun  * @np: Device node pointer associated with the PM domain provider.
2224*4882a593Smuzhiyun  * @genpd: Pointer to PM domain associated with the PM domain provider.
2225*4882a593Smuzhiyun  */
of_genpd_add_provider_simple(struct device_node * np,struct generic_pm_domain * genpd)2226*4882a593Smuzhiyun int of_genpd_add_provider_simple(struct device_node *np,
2227*4882a593Smuzhiyun 				 struct generic_pm_domain *genpd)
2228*4882a593Smuzhiyun {
2229*4882a593Smuzhiyun 	int ret = -EINVAL;
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	if (!np || !genpd)
2232*4882a593Smuzhiyun 		return -EINVAL;
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	if (!genpd_present(genpd))
2237*4882a593Smuzhiyun 		goto unlock;
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	genpd->dev.of_node = np;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	/* Parse genpd OPP table */
2242*4882a593Smuzhiyun 	if (genpd->set_performance_state) {
2243*4882a593Smuzhiyun 		ret = dev_pm_opp_of_add_table(&genpd->dev);
2244*4882a593Smuzhiyun 		if (ret) {
2245*4882a593Smuzhiyun 			if (ret != -EPROBE_DEFER)
2246*4882a593Smuzhiyun 				dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2247*4882a593Smuzhiyun 					ret);
2248*4882a593Smuzhiyun 			goto unlock;
2249*4882a593Smuzhiyun 		}
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 		/*
2252*4882a593Smuzhiyun 		 * Save table for faster processing while setting performance
2253*4882a593Smuzhiyun 		 * state.
2254*4882a593Smuzhiyun 		 */
2255*4882a593Smuzhiyun 		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2256*4882a593Smuzhiyun 		WARN_ON(IS_ERR(genpd->opp_table));
2257*4882a593Smuzhiyun 	}
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2260*4882a593Smuzhiyun 	if (ret) {
2261*4882a593Smuzhiyun 		if (genpd->set_performance_state) {
2262*4882a593Smuzhiyun 			dev_pm_opp_put_opp_table(genpd->opp_table);
2263*4882a593Smuzhiyun 			dev_pm_opp_of_remove_table(&genpd->dev);
2264*4882a593Smuzhiyun 		}
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 		goto unlock;
2267*4882a593Smuzhiyun 	}
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	genpd->provider = &np->fwnode;
2270*4882a593Smuzhiyun 	genpd->has_provider = true;
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun unlock:
2273*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	return ret;
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun /**
2280*4882a593Smuzhiyun  * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2281*4882a593Smuzhiyun  * @np: Device node pointer associated with the PM domain provider.
2282*4882a593Smuzhiyun  * @data: Pointer to the data associated with the PM domain provider.
2283*4882a593Smuzhiyun  */
of_genpd_add_provider_onecell(struct device_node * np,struct genpd_onecell_data * data)2284*4882a593Smuzhiyun int of_genpd_add_provider_onecell(struct device_node *np,
2285*4882a593Smuzhiyun 				  struct genpd_onecell_data *data)
2286*4882a593Smuzhiyun {
2287*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
2288*4882a593Smuzhiyun 	unsigned int i;
2289*4882a593Smuzhiyun 	int ret = -EINVAL;
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun 	if (!np || !data)
2292*4882a593Smuzhiyun 		return -EINVAL;
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 	if (!data->xlate)
2297*4882a593Smuzhiyun 		data->xlate = genpd_xlate_onecell;
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	for (i = 0; i < data->num_domains; i++) {
2300*4882a593Smuzhiyun 		genpd = data->domains[i];
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 		if (!genpd)
2303*4882a593Smuzhiyun 			continue;
2304*4882a593Smuzhiyun 		if (!genpd_present(genpd))
2305*4882a593Smuzhiyun 			goto error;
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 		genpd->dev.of_node = np;
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 		/* Parse genpd OPP table */
2310*4882a593Smuzhiyun 		if (genpd->set_performance_state) {
2311*4882a593Smuzhiyun 			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2312*4882a593Smuzhiyun 			if (ret) {
2313*4882a593Smuzhiyun 				if (ret != -EPROBE_DEFER)
2314*4882a593Smuzhiyun 					dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2315*4882a593Smuzhiyun 						i, ret);
2316*4882a593Smuzhiyun 				goto error;
2317*4882a593Smuzhiyun 			}
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 			/*
2320*4882a593Smuzhiyun 			 * Save table for faster processing while setting
2321*4882a593Smuzhiyun 			 * performance state.
2322*4882a593Smuzhiyun 			 */
2323*4882a593Smuzhiyun 			genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2324*4882a593Smuzhiyun 			WARN_ON(IS_ERR(genpd->opp_table));
2325*4882a593Smuzhiyun 		}
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 		genpd->provider = &np->fwnode;
2328*4882a593Smuzhiyun 		genpd->has_provider = true;
2329*4882a593Smuzhiyun 	}
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	ret = genpd_add_provider(np, data->xlate, data);
2332*4882a593Smuzhiyun 	if (ret < 0)
2333*4882a593Smuzhiyun 		goto error;
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 	return 0;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun error:
2340*4882a593Smuzhiyun 	while (i--) {
2341*4882a593Smuzhiyun 		genpd = data->domains[i];
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 		if (!genpd)
2344*4882a593Smuzhiyun 			continue;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 		genpd->provider = NULL;
2347*4882a593Smuzhiyun 		genpd->has_provider = false;
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 		if (genpd->set_performance_state) {
2350*4882a593Smuzhiyun 			dev_pm_opp_put_opp_table(genpd->opp_table);
2351*4882a593Smuzhiyun 			dev_pm_opp_of_remove_table(&genpd->dev);
2352*4882a593Smuzhiyun 		}
2353*4882a593Smuzhiyun 	}
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	return ret;
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun /**
2362*4882a593Smuzhiyun  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2363*4882a593Smuzhiyun  * @np: Device node pointer associated with the PM domain provider
2364*4882a593Smuzhiyun  */
of_genpd_del_provider(struct device_node * np)2365*4882a593Smuzhiyun void of_genpd_del_provider(struct device_node *np)
2366*4882a593Smuzhiyun {
2367*4882a593Smuzhiyun 	struct of_genpd_provider *cp, *tmp;
2368*4882a593Smuzhiyun 	struct generic_pm_domain *gpd;
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2371*4882a593Smuzhiyun 	mutex_lock(&of_genpd_mutex);
2372*4882a593Smuzhiyun 	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2373*4882a593Smuzhiyun 		if (cp->node == np) {
2374*4882a593Smuzhiyun 			/*
2375*4882a593Smuzhiyun 			 * For each PM domain associated with the
2376*4882a593Smuzhiyun 			 * provider, set the 'has_provider' to false
2377*4882a593Smuzhiyun 			 * so that the PM domain can be safely removed.
2378*4882a593Smuzhiyun 			 */
2379*4882a593Smuzhiyun 			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2380*4882a593Smuzhiyun 				if (gpd->provider == &np->fwnode) {
2381*4882a593Smuzhiyun 					gpd->has_provider = false;
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 					if (!gpd->set_performance_state)
2384*4882a593Smuzhiyun 						continue;
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 					dev_pm_opp_put_opp_table(gpd->opp_table);
2387*4882a593Smuzhiyun 					dev_pm_opp_of_remove_table(&gpd->dev);
2388*4882a593Smuzhiyun 				}
2389*4882a593Smuzhiyun 			}
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 			fwnode_dev_initialized(&cp->node->fwnode, false);
2392*4882a593Smuzhiyun 			list_del(&cp->link);
2393*4882a593Smuzhiyun 			of_node_put(cp->node);
2394*4882a593Smuzhiyun 			kfree(cp);
2395*4882a593Smuzhiyun 			break;
2396*4882a593Smuzhiyun 		}
2397*4882a593Smuzhiyun 	}
2398*4882a593Smuzhiyun 	mutex_unlock(&of_genpd_mutex);
2399*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2400*4882a593Smuzhiyun }
2401*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun /**
2404*4882a593Smuzhiyun  * genpd_get_from_provider() - Look-up PM domain
2405*4882a593Smuzhiyun  * @genpdspec: OF phandle args to use for look-up
2406*4882a593Smuzhiyun  *
2407*4882a593Smuzhiyun  * Looks for a PM domain provider under the node specified by @genpdspec and if
2408*4882a593Smuzhiyun  * found, uses xlate function of the provider to map phandle args to a PM
2409*4882a593Smuzhiyun  * domain.
2410*4882a593Smuzhiyun  *
2411*4882a593Smuzhiyun  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2412*4882a593Smuzhiyun  * on failure.
2413*4882a593Smuzhiyun  */
genpd_get_from_provider(struct of_phandle_args * genpdspec)2414*4882a593Smuzhiyun static struct generic_pm_domain *genpd_get_from_provider(
2415*4882a593Smuzhiyun 					struct of_phandle_args *genpdspec)
2416*4882a593Smuzhiyun {
2417*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2418*4882a593Smuzhiyun 	struct of_genpd_provider *provider;
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 	if (!genpdspec)
2421*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun 	mutex_lock(&of_genpd_mutex);
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	/* Check if we have such a provider in our array */
2426*4882a593Smuzhiyun 	list_for_each_entry(provider, &of_genpd_providers, link) {
2427*4882a593Smuzhiyun 		if (provider->node == genpdspec->np)
2428*4882a593Smuzhiyun 			genpd = provider->xlate(genpdspec, provider->data);
2429*4882a593Smuzhiyun 		if (!IS_ERR(genpd))
2430*4882a593Smuzhiyun 			break;
2431*4882a593Smuzhiyun 	}
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 	mutex_unlock(&of_genpd_mutex);
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	return genpd;
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun /**
2439*4882a593Smuzhiyun  * of_genpd_add_device() - Add a device to an I/O PM domain
2440*4882a593Smuzhiyun  * @genpdspec: OF phandle args to use for look-up PM domain
2441*4882a593Smuzhiyun  * @dev: Device to be added.
2442*4882a593Smuzhiyun  *
2443*4882a593Smuzhiyun  * Looks-up an I/O PM domain based upon phandle args provided and adds
2444*4882a593Smuzhiyun  * the device to the PM domain. Returns a negative error code on failure.
2445*4882a593Smuzhiyun  */
of_genpd_add_device(struct of_phandle_args * genpdspec,struct device * dev)2446*4882a593Smuzhiyun int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2447*4882a593Smuzhiyun {
2448*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
2449*4882a593Smuzhiyun 	int ret;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	genpd = genpd_get_from_provider(genpdspec);
2454*4882a593Smuzhiyun 	if (IS_ERR(genpd)) {
2455*4882a593Smuzhiyun 		ret = PTR_ERR(genpd);
2456*4882a593Smuzhiyun 		goto out;
2457*4882a593Smuzhiyun 	}
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	ret = genpd_add_device(genpd, dev, dev);
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun out:
2462*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	return ret;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_add_device);
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun /**
2469*4882a593Smuzhiyun  * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2470*4882a593Smuzhiyun  * @parent_spec: OF phandle args to use for parent PM domain look-up
2471*4882a593Smuzhiyun  * @subdomain_spec: OF phandle args to use for subdomain look-up
2472*4882a593Smuzhiyun  *
2473*4882a593Smuzhiyun  * Looks-up a parent PM domain and subdomain based upon phandle args
2474*4882a593Smuzhiyun  * provided and adds the subdomain to the parent PM domain. Returns a
2475*4882a593Smuzhiyun  * negative error code on failure.
2476*4882a593Smuzhiyun  */
of_genpd_add_subdomain(struct of_phandle_args * parent_spec,struct of_phandle_args * subdomain_spec)2477*4882a593Smuzhiyun int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2478*4882a593Smuzhiyun 			   struct of_phandle_args *subdomain_spec)
2479*4882a593Smuzhiyun {
2480*4882a593Smuzhiyun 	struct generic_pm_domain *parent, *subdomain;
2481*4882a593Smuzhiyun 	int ret;
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	parent = genpd_get_from_provider(parent_spec);
2486*4882a593Smuzhiyun 	if (IS_ERR(parent)) {
2487*4882a593Smuzhiyun 		ret = PTR_ERR(parent);
2488*4882a593Smuzhiyun 		goto out;
2489*4882a593Smuzhiyun 	}
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	subdomain = genpd_get_from_provider(subdomain_spec);
2492*4882a593Smuzhiyun 	if (IS_ERR(subdomain)) {
2493*4882a593Smuzhiyun 		ret = PTR_ERR(subdomain);
2494*4882a593Smuzhiyun 		goto out;
2495*4882a593Smuzhiyun 	}
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	ret = genpd_add_subdomain(parent, subdomain);
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun out:
2500*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	return ret;
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun /**
2507*4882a593Smuzhiyun  * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2508*4882a593Smuzhiyun  * @parent_spec: OF phandle args to use for parent PM domain look-up
2509*4882a593Smuzhiyun  * @subdomain_spec: OF phandle args to use for subdomain look-up
2510*4882a593Smuzhiyun  *
2511*4882a593Smuzhiyun  * Looks-up a parent PM domain and subdomain based upon phandle args
2512*4882a593Smuzhiyun  * provided and removes the subdomain from the parent PM domain. Returns a
2513*4882a593Smuzhiyun  * negative error code on failure.
2514*4882a593Smuzhiyun  */
of_genpd_remove_subdomain(struct of_phandle_args * parent_spec,struct of_phandle_args * subdomain_spec)2515*4882a593Smuzhiyun int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2516*4882a593Smuzhiyun 			      struct of_phandle_args *subdomain_spec)
2517*4882a593Smuzhiyun {
2518*4882a593Smuzhiyun 	struct generic_pm_domain *parent, *subdomain;
2519*4882a593Smuzhiyun 	int ret;
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	parent = genpd_get_from_provider(parent_spec);
2524*4882a593Smuzhiyun 	if (IS_ERR(parent)) {
2525*4882a593Smuzhiyun 		ret = PTR_ERR(parent);
2526*4882a593Smuzhiyun 		goto out;
2527*4882a593Smuzhiyun 	}
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 	subdomain = genpd_get_from_provider(subdomain_spec);
2530*4882a593Smuzhiyun 	if (IS_ERR(subdomain)) {
2531*4882a593Smuzhiyun 		ret = PTR_ERR(subdomain);
2532*4882a593Smuzhiyun 		goto out;
2533*4882a593Smuzhiyun 	}
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 	ret = pm_genpd_remove_subdomain(parent, subdomain);
2536*4882a593Smuzhiyun 
2537*4882a593Smuzhiyun out:
2538*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	return ret;
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun /**
2545*4882a593Smuzhiyun  * of_genpd_remove_last - Remove the last PM domain registered for a provider
2546*4882a593Smuzhiyun  * @provider: Pointer to device structure associated with provider
2547*4882a593Smuzhiyun  *
2548*4882a593Smuzhiyun  * Find the last PM domain that was added by a particular provider and
2549*4882a593Smuzhiyun  * remove this PM domain from the list of PM domains. The provider is
2550*4882a593Smuzhiyun  * identified by the 'provider' device structure that is passed. The PM
2551*4882a593Smuzhiyun  * domain will only be removed, if the provider associated with domain
2552*4882a593Smuzhiyun  * has been removed.
2553*4882a593Smuzhiyun  *
2554*4882a593Smuzhiyun  * Returns a valid pointer to struct generic_pm_domain on success or
2555*4882a593Smuzhiyun  * ERR_PTR() on failure.
2556*4882a593Smuzhiyun  */
of_genpd_remove_last(struct device_node * np)2557*4882a593Smuzhiyun struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2558*4882a593Smuzhiyun {
2559*4882a593Smuzhiyun 	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2560*4882a593Smuzhiyun 	int ret;
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(np))
2563*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2566*4882a593Smuzhiyun 	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2567*4882a593Smuzhiyun 		if (gpd->provider == &np->fwnode) {
2568*4882a593Smuzhiyun 			ret = genpd_remove(gpd);
2569*4882a593Smuzhiyun 			genpd = ret ? ERR_PTR(ret) : gpd;
2570*4882a593Smuzhiyun 			break;
2571*4882a593Smuzhiyun 		}
2572*4882a593Smuzhiyun 	}
2573*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 	return genpd;
2576*4882a593Smuzhiyun }
2577*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2578*4882a593Smuzhiyun 
genpd_release_dev(struct device * dev)2579*4882a593Smuzhiyun static void genpd_release_dev(struct device *dev)
2580*4882a593Smuzhiyun {
2581*4882a593Smuzhiyun 	of_node_put(dev->of_node);
2582*4882a593Smuzhiyun 	kfree(dev);
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun static struct bus_type genpd_bus_type = {
2586*4882a593Smuzhiyun 	.name		= "genpd",
2587*4882a593Smuzhiyun };
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun /**
2590*4882a593Smuzhiyun  * genpd_dev_pm_detach - Detach a device from its PM domain.
2591*4882a593Smuzhiyun  * @dev: Device to detach.
2592*4882a593Smuzhiyun  * @power_off: Currently not used
2593*4882a593Smuzhiyun  *
2594*4882a593Smuzhiyun  * Try to locate a corresponding generic PM domain, which the device was
2595*4882a593Smuzhiyun  * attached to previously. If such is found, the device is detached from it.
2596*4882a593Smuzhiyun  */
genpd_dev_pm_detach(struct device * dev,bool power_off)2597*4882a593Smuzhiyun static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2598*4882a593Smuzhiyun {
2599*4882a593Smuzhiyun 	struct generic_pm_domain *pd;
2600*4882a593Smuzhiyun 	unsigned int i;
2601*4882a593Smuzhiyun 	int ret = 0;
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	pd = dev_to_genpd(dev);
2604*4882a593Smuzhiyun 	if (IS_ERR(pd))
2605*4882a593Smuzhiyun 		return;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2610*4882a593Smuzhiyun 		ret = genpd_remove_device(pd, dev);
2611*4882a593Smuzhiyun 		if (ret != -EAGAIN)
2612*4882a593Smuzhiyun 			break;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 		mdelay(i);
2615*4882a593Smuzhiyun 		cond_resched();
2616*4882a593Smuzhiyun 	}
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	if (ret < 0) {
2619*4882a593Smuzhiyun 		dev_err(dev, "failed to remove from PM domain %s: %d",
2620*4882a593Smuzhiyun 			pd->name, ret);
2621*4882a593Smuzhiyun 		return;
2622*4882a593Smuzhiyun 	}
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 	/* Check if PM domain can be powered off after removing this device. */
2625*4882a593Smuzhiyun 	genpd_queue_power_off_work(pd);
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 	/* Unregister the device if it was created by genpd. */
2628*4882a593Smuzhiyun 	if (dev->bus == &genpd_bus_type)
2629*4882a593Smuzhiyun 		device_unregister(dev);
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun 
genpd_dev_pm_sync(struct device * dev)2632*4882a593Smuzhiyun static void genpd_dev_pm_sync(struct device *dev)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun 	struct generic_pm_domain *pd;
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	pd = dev_to_genpd(dev);
2637*4882a593Smuzhiyun 	if (IS_ERR(pd))
2638*4882a593Smuzhiyun 		return;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	genpd_queue_power_off_work(pd);
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun 
__genpd_dev_pm_attach(struct device * dev,struct device * base_dev,unsigned int index,bool power_on)2643*4882a593Smuzhiyun static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2644*4882a593Smuzhiyun 				 unsigned int index, bool power_on)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun 	struct of_phandle_args pd_args;
2647*4882a593Smuzhiyun 	struct generic_pm_domain *pd;
2648*4882a593Smuzhiyun 	int ret;
2649*4882a593Smuzhiyun 
2650*4882a593Smuzhiyun 	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2651*4882a593Smuzhiyun 				"#power-domain-cells", index, &pd_args);
2652*4882a593Smuzhiyun 	if (ret < 0)
2653*4882a593Smuzhiyun 		return ret;
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 	mutex_lock(&gpd_list_lock);
2656*4882a593Smuzhiyun 	pd = genpd_get_from_provider(&pd_args);
2657*4882a593Smuzhiyun 	of_node_put(pd_args.np);
2658*4882a593Smuzhiyun 	if (IS_ERR(pd)) {
2659*4882a593Smuzhiyun 		mutex_unlock(&gpd_list_lock);
2660*4882a593Smuzhiyun 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2661*4882a593Smuzhiyun 			__func__, PTR_ERR(pd));
2662*4882a593Smuzhiyun 		return driver_deferred_probe_check_state(base_dev);
2663*4882a593Smuzhiyun 	}
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	ret = genpd_add_device(pd, dev, base_dev);
2668*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 	if (ret < 0) {
2671*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
2672*4882a593Smuzhiyun 			dev_err(dev, "failed to add to PM domain %s: %d",
2673*4882a593Smuzhiyun 				pd->name, ret);
2674*4882a593Smuzhiyun 		return ret;
2675*4882a593Smuzhiyun 	}
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	dev->pm_domain->detach = genpd_dev_pm_detach;
2678*4882a593Smuzhiyun 	dev->pm_domain->sync = genpd_dev_pm_sync;
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	if (power_on) {
2681*4882a593Smuzhiyun 		genpd_lock(pd);
2682*4882a593Smuzhiyun 		ret = genpd_power_on(pd, 0);
2683*4882a593Smuzhiyun 		genpd_unlock(pd);
2684*4882a593Smuzhiyun 	}
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 	if (ret)
2687*4882a593Smuzhiyun 		genpd_remove_device(pd, dev);
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	return ret ? -EPROBE_DEFER : 1;
2690*4882a593Smuzhiyun }
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun /**
2693*4882a593Smuzhiyun  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2694*4882a593Smuzhiyun  * @dev: Device to attach.
2695*4882a593Smuzhiyun  *
2696*4882a593Smuzhiyun  * Parse device's OF node to find a PM domain specifier. If such is found,
2697*4882a593Smuzhiyun  * attaches the device to retrieved pm_domain ops.
2698*4882a593Smuzhiyun  *
2699*4882a593Smuzhiyun  * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2700*4882a593Smuzhiyun  * PM domain or when multiple power-domains exists for it, else a negative error
2701*4882a593Smuzhiyun  * code. Note that if a power-domain exists for the device, but it cannot be
2702*4882a593Smuzhiyun  * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2703*4882a593Smuzhiyun  * not probed and to re-try again later.
2704*4882a593Smuzhiyun  */
genpd_dev_pm_attach(struct device * dev)2705*4882a593Smuzhiyun int genpd_dev_pm_attach(struct device *dev)
2706*4882a593Smuzhiyun {
2707*4882a593Smuzhiyun 	if (!dev->of_node)
2708*4882a593Smuzhiyun 		return 0;
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	/*
2711*4882a593Smuzhiyun 	 * Devices with multiple PM domains must be attached separately, as we
2712*4882a593Smuzhiyun 	 * can only attach one PM domain per device.
2713*4882a593Smuzhiyun 	 */
2714*4882a593Smuzhiyun 	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2715*4882a593Smuzhiyun 				       "#power-domain-cells") != 1)
2716*4882a593Smuzhiyun 		return 0;
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun 	return __genpd_dev_pm_attach(dev, dev, 0, true);
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun /**
2723*4882a593Smuzhiyun  * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2724*4882a593Smuzhiyun  * @dev: The device used to lookup the PM domain.
2725*4882a593Smuzhiyun  * @index: The index of the PM domain.
2726*4882a593Smuzhiyun  *
2727*4882a593Smuzhiyun  * Parse device's OF node to find a PM domain specifier at the provided @index.
2728*4882a593Smuzhiyun  * If such is found, creates a virtual device and attaches it to the retrieved
2729*4882a593Smuzhiyun  * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2730*4882a593Smuzhiyun  * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2731*4882a593Smuzhiyun  *
2732*4882a593Smuzhiyun  * Returns the created virtual device if successfully attached PM domain, NULL
2733*4882a593Smuzhiyun  * when the device don't need a PM domain, else an ERR_PTR() in case of
2734*4882a593Smuzhiyun  * failures. If a power-domain exists for the device, but cannot be found or
2735*4882a593Smuzhiyun  * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2736*4882a593Smuzhiyun  * is not probed and to re-try again later.
2737*4882a593Smuzhiyun  */
genpd_dev_pm_attach_by_id(struct device * dev,unsigned int index)2738*4882a593Smuzhiyun struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2739*4882a593Smuzhiyun 					 unsigned int index)
2740*4882a593Smuzhiyun {
2741*4882a593Smuzhiyun 	struct device *virt_dev;
2742*4882a593Smuzhiyun 	int num_domains;
2743*4882a593Smuzhiyun 	int ret;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	if (!dev->of_node)
2746*4882a593Smuzhiyun 		return NULL;
2747*4882a593Smuzhiyun 
2748*4882a593Smuzhiyun 	/* Verify that the index is within a valid range. */
2749*4882a593Smuzhiyun 	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2750*4882a593Smuzhiyun 						 "#power-domain-cells");
2751*4882a593Smuzhiyun 	if (index >= num_domains)
2752*4882a593Smuzhiyun 		return NULL;
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	/* Allocate and register device on the genpd bus. */
2755*4882a593Smuzhiyun 	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2756*4882a593Smuzhiyun 	if (!virt_dev)
2757*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2760*4882a593Smuzhiyun 	virt_dev->bus = &genpd_bus_type;
2761*4882a593Smuzhiyun 	virt_dev->release = genpd_release_dev;
2762*4882a593Smuzhiyun 	virt_dev->of_node = of_node_get(dev->of_node);
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun 	ret = device_register(virt_dev);
2765*4882a593Smuzhiyun 	if (ret) {
2766*4882a593Smuzhiyun 		put_device(virt_dev);
2767*4882a593Smuzhiyun 		return ERR_PTR(ret);
2768*4882a593Smuzhiyun 	}
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	/* Try to attach the device to the PM domain at the specified index. */
2771*4882a593Smuzhiyun 	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2772*4882a593Smuzhiyun 	if (ret < 1) {
2773*4882a593Smuzhiyun 		device_unregister(virt_dev);
2774*4882a593Smuzhiyun 		return ret ? ERR_PTR(ret) : NULL;
2775*4882a593Smuzhiyun 	}
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	pm_runtime_enable(virt_dev);
2778*4882a593Smuzhiyun 	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2779*4882a593Smuzhiyun 
2780*4882a593Smuzhiyun 	return virt_dev;
2781*4882a593Smuzhiyun }
2782*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun /**
2785*4882a593Smuzhiyun  * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2786*4882a593Smuzhiyun  * @dev: The device used to lookup the PM domain.
2787*4882a593Smuzhiyun  * @name: The name of the PM domain.
2788*4882a593Smuzhiyun  *
2789*4882a593Smuzhiyun  * Parse device's OF node to find a PM domain specifier using the
2790*4882a593Smuzhiyun  * power-domain-names DT property. For further description see
2791*4882a593Smuzhiyun  * genpd_dev_pm_attach_by_id().
2792*4882a593Smuzhiyun  */
genpd_dev_pm_attach_by_name(struct device * dev,const char * name)2793*4882a593Smuzhiyun struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2794*4882a593Smuzhiyun {
2795*4882a593Smuzhiyun 	int index;
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	if (!dev->of_node)
2798*4882a593Smuzhiyun 		return NULL;
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 	index = of_property_match_string(dev->of_node, "power-domain-names",
2801*4882a593Smuzhiyun 					 name);
2802*4882a593Smuzhiyun 	if (index < 0)
2803*4882a593Smuzhiyun 		return NULL;
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 	return genpd_dev_pm_attach_by_id(dev, index);
2806*4882a593Smuzhiyun }
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun static const struct of_device_id idle_state_match[] = {
2809*4882a593Smuzhiyun 	{ .compatible = "domain-idle-state", },
2810*4882a593Smuzhiyun 	{ }
2811*4882a593Smuzhiyun };
2812*4882a593Smuzhiyun 
genpd_parse_state(struct genpd_power_state * genpd_state,struct device_node * state_node)2813*4882a593Smuzhiyun static int genpd_parse_state(struct genpd_power_state *genpd_state,
2814*4882a593Smuzhiyun 				    struct device_node *state_node)
2815*4882a593Smuzhiyun {
2816*4882a593Smuzhiyun 	int err;
2817*4882a593Smuzhiyun 	u32 residency;
2818*4882a593Smuzhiyun 	u32 entry_latency, exit_latency;
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	err = of_property_read_u32(state_node, "entry-latency-us",
2821*4882a593Smuzhiyun 						&entry_latency);
2822*4882a593Smuzhiyun 	if (err) {
2823*4882a593Smuzhiyun 		pr_debug(" * %pOF missing entry-latency-us property\n",
2824*4882a593Smuzhiyun 			 state_node);
2825*4882a593Smuzhiyun 		return -EINVAL;
2826*4882a593Smuzhiyun 	}
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	err = of_property_read_u32(state_node, "exit-latency-us",
2829*4882a593Smuzhiyun 						&exit_latency);
2830*4882a593Smuzhiyun 	if (err) {
2831*4882a593Smuzhiyun 		pr_debug(" * %pOF missing exit-latency-us property\n",
2832*4882a593Smuzhiyun 			 state_node);
2833*4882a593Smuzhiyun 		return -EINVAL;
2834*4882a593Smuzhiyun 	}
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2837*4882a593Smuzhiyun 	if (!err)
2838*4882a593Smuzhiyun 		genpd_state->residency_ns = 1000 * residency;
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 	genpd_state->power_on_latency_ns = 1000 * exit_latency;
2841*4882a593Smuzhiyun 	genpd_state->power_off_latency_ns = 1000 * entry_latency;
2842*4882a593Smuzhiyun 	genpd_state->fwnode = &state_node->fwnode;
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 	return 0;
2845*4882a593Smuzhiyun }
2846*4882a593Smuzhiyun 
genpd_iterate_idle_states(struct device_node * dn,struct genpd_power_state * states)2847*4882a593Smuzhiyun static int genpd_iterate_idle_states(struct device_node *dn,
2848*4882a593Smuzhiyun 				     struct genpd_power_state *states)
2849*4882a593Smuzhiyun {
2850*4882a593Smuzhiyun 	int ret;
2851*4882a593Smuzhiyun 	struct of_phandle_iterator it;
2852*4882a593Smuzhiyun 	struct device_node *np;
2853*4882a593Smuzhiyun 	int i = 0;
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2856*4882a593Smuzhiyun 	if (ret <= 0)
2857*4882a593Smuzhiyun 		return ret == -ENOENT ? 0 : ret;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 	/* Loop over the phandles until all the requested entry is found */
2860*4882a593Smuzhiyun 	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2861*4882a593Smuzhiyun 		np = it.node;
2862*4882a593Smuzhiyun 		if (!of_match_node(idle_state_match, np))
2863*4882a593Smuzhiyun 			continue;
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 		if (!of_device_is_available(np))
2866*4882a593Smuzhiyun 			continue;
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 		if (states) {
2869*4882a593Smuzhiyun 			ret = genpd_parse_state(&states[i], np);
2870*4882a593Smuzhiyun 			if (ret) {
2871*4882a593Smuzhiyun 				pr_err("Parsing idle state node %pOF failed with err %d\n",
2872*4882a593Smuzhiyun 				       np, ret);
2873*4882a593Smuzhiyun 				of_node_put(np);
2874*4882a593Smuzhiyun 				return ret;
2875*4882a593Smuzhiyun 			}
2876*4882a593Smuzhiyun 		}
2877*4882a593Smuzhiyun 		i++;
2878*4882a593Smuzhiyun 	}
2879*4882a593Smuzhiyun 
2880*4882a593Smuzhiyun 	return i;
2881*4882a593Smuzhiyun }
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun /**
2884*4882a593Smuzhiyun  * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2885*4882a593Smuzhiyun  *
2886*4882a593Smuzhiyun  * @dn: The genpd device node
2887*4882a593Smuzhiyun  * @states: The pointer to which the state array will be saved.
2888*4882a593Smuzhiyun  * @n: The count of elements in the array returned from this function.
2889*4882a593Smuzhiyun  *
2890*4882a593Smuzhiyun  * Returns the device states parsed from the OF node. The memory for the states
2891*4882a593Smuzhiyun  * is allocated by this function and is the responsibility of the caller to
2892*4882a593Smuzhiyun  * free the memory after use. If any or zero compatible domain idle states is
2893*4882a593Smuzhiyun  * found it returns 0 and in case of errors, a negative error code is returned.
2894*4882a593Smuzhiyun  */
of_genpd_parse_idle_states(struct device_node * dn,struct genpd_power_state ** states,int * n)2895*4882a593Smuzhiyun int of_genpd_parse_idle_states(struct device_node *dn,
2896*4882a593Smuzhiyun 			struct genpd_power_state **states, int *n)
2897*4882a593Smuzhiyun {
2898*4882a593Smuzhiyun 	struct genpd_power_state *st;
2899*4882a593Smuzhiyun 	int ret;
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun 	ret = genpd_iterate_idle_states(dn, NULL);
2902*4882a593Smuzhiyun 	if (ret < 0)
2903*4882a593Smuzhiyun 		return ret;
2904*4882a593Smuzhiyun 
2905*4882a593Smuzhiyun 	if (!ret) {
2906*4882a593Smuzhiyun 		*states = NULL;
2907*4882a593Smuzhiyun 		*n = 0;
2908*4882a593Smuzhiyun 		return 0;
2909*4882a593Smuzhiyun 	}
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2912*4882a593Smuzhiyun 	if (!st)
2913*4882a593Smuzhiyun 		return -ENOMEM;
2914*4882a593Smuzhiyun 
2915*4882a593Smuzhiyun 	ret = genpd_iterate_idle_states(dn, st);
2916*4882a593Smuzhiyun 	if (ret <= 0) {
2917*4882a593Smuzhiyun 		kfree(st);
2918*4882a593Smuzhiyun 		return ret < 0 ? ret : -EINVAL;
2919*4882a593Smuzhiyun 	}
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	*states = st;
2922*4882a593Smuzhiyun 	*n = ret;
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 	return 0;
2925*4882a593Smuzhiyun }
2926*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun /**
2929*4882a593Smuzhiyun  * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2930*4882a593Smuzhiyun  *
2931*4882a593Smuzhiyun  * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2932*4882a593Smuzhiyun  * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2933*4882a593Smuzhiyun  *	state.
2934*4882a593Smuzhiyun  *
2935*4882a593Smuzhiyun  * Returns performance state encoded in the OPP of the genpd. This calls
2936*4882a593Smuzhiyun  * platform specific genpd->opp_to_performance_state() callback to translate
2937*4882a593Smuzhiyun  * power domain OPP to performance state.
2938*4882a593Smuzhiyun  *
2939*4882a593Smuzhiyun  * Returns performance state on success and 0 on failure.
2940*4882a593Smuzhiyun  */
pm_genpd_opp_to_performance_state(struct device * genpd_dev,struct dev_pm_opp * opp)2941*4882a593Smuzhiyun unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2942*4882a593Smuzhiyun 					       struct dev_pm_opp *opp)
2943*4882a593Smuzhiyun {
2944*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = NULL;
2945*4882a593Smuzhiyun 	int state;
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun 	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	if (unlikely(!genpd->opp_to_performance_state))
2950*4882a593Smuzhiyun 		return 0;
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun 	genpd_lock(genpd);
2953*4882a593Smuzhiyun 	state = genpd->opp_to_performance_state(genpd, opp);
2954*4882a593Smuzhiyun 	genpd_unlock(genpd);
2955*4882a593Smuzhiyun 
2956*4882a593Smuzhiyun 	return state;
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2959*4882a593Smuzhiyun 
genpd_bus_init(void)2960*4882a593Smuzhiyun static int __init genpd_bus_init(void)
2961*4882a593Smuzhiyun {
2962*4882a593Smuzhiyun 	return bus_register(&genpd_bus_type);
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun core_initcall(genpd_bus_init);
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun 
2969*4882a593Smuzhiyun /***        debugfs support        ***/
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
2972*4882a593Smuzhiyun /*
2973*4882a593Smuzhiyun  * TODO: This function is a slightly modified version of rtpm_status_show
2974*4882a593Smuzhiyun  * from sysfs.c, so generalize it.
2975*4882a593Smuzhiyun  */
rtpm_status_str(struct seq_file * s,struct device * dev)2976*4882a593Smuzhiyun static void rtpm_status_str(struct seq_file *s, struct device *dev)
2977*4882a593Smuzhiyun {
2978*4882a593Smuzhiyun 	static const char * const status_lookup[] = {
2979*4882a593Smuzhiyun 		[RPM_ACTIVE] = "active",
2980*4882a593Smuzhiyun 		[RPM_RESUMING] = "resuming",
2981*4882a593Smuzhiyun 		[RPM_SUSPENDED] = "suspended",
2982*4882a593Smuzhiyun 		[RPM_SUSPENDING] = "suspending"
2983*4882a593Smuzhiyun 	};
2984*4882a593Smuzhiyun 	const char *p = "";
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	if (dev->power.runtime_error)
2987*4882a593Smuzhiyun 		p = "error";
2988*4882a593Smuzhiyun 	else if (dev->power.disable_depth)
2989*4882a593Smuzhiyun 		p = "unsupported";
2990*4882a593Smuzhiyun 	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2991*4882a593Smuzhiyun 		p = status_lookup[dev->power.runtime_status];
2992*4882a593Smuzhiyun 	else
2993*4882a593Smuzhiyun 		WARN_ON(1);
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	seq_puts(s, p);
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun 
genpd_summary_one(struct seq_file * s,struct generic_pm_domain * genpd)2998*4882a593Smuzhiyun static int genpd_summary_one(struct seq_file *s,
2999*4882a593Smuzhiyun 			struct generic_pm_domain *genpd)
3000*4882a593Smuzhiyun {
3001*4882a593Smuzhiyun 	static const char * const status_lookup[] = {
3002*4882a593Smuzhiyun 		[GENPD_STATE_ON] = "on",
3003*4882a593Smuzhiyun 		[GENPD_STATE_OFF] = "off"
3004*4882a593Smuzhiyun 	};
3005*4882a593Smuzhiyun 	struct pm_domain_data *pm_data;
3006*4882a593Smuzhiyun 	const char *kobj_path;
3007*4882a593Smuzhiyun 	struct gpd_link *link;
3008*4882a593Smuzhiyun 	char state[16];
3009*4882a593Smuzhiyun 	int ret;
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3012*4882a593Smuzhiyun 	if (ret)
3013*4882a593Smuzhiyun 		return -ERESTARTSYS;
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3016*4882a593Smuzhiyun 		goto exit;
3017*4882a593Smuzhiyun 	if (!genpd_status_on(genpd))
3018*4882a593Smuzhiyun 		snprintf(state, sizeof(state), "%s-%u",
3019*4882a593Smuzhiyun 			 status_lookup[genpd->status], genpd->state_idx);
3020*4882a593Smuzhiyun 	else
3021*4882a593Smuzhiyun 		snprintf(state, sizeof(state), "%s",
3022*4882a593Smuzhiyun 			 status_lookup[genpd->status]);
3023*4882a593Smuzhiyun 	seq_printf(s, "%-30s  %-15s ", genpd->name, state);
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	/*
3026*4882a593Smuzhiyun 	 * Modifications on the list require holding locks on both
3027*4882a593Smuzhiyun 	 * parent and child, so we are safe.
3028*4882a593Smuzhiyun 	 * Also genpd->name is immutable.
3029*4882a593Smuzhiyun 	 */
3030*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3031*4882a593Smuzhiyun 		seq_printf(s, "%s", link->child->name);
3032*4882a593Smuzhiyun 		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3033*4882a593Smuzhiyun 			seq_puts(s, ", ");
3034*4882a593Smuzhiyun 	}
3035*4882a593Smuzhiyun 
3036*4882a593Smuzhiyun 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3037*4882a593Smuzhiyun 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3038*4882a593Smuzhiyun 				genpd_is_irq_safe(genpd) ?
3039*4882a593Smuzhiyun 				GFP_ATOMIC : GFP_KERNEL);
3040*4882a593Smuzhiyun 		if (kobj_path == NULL)
3041*4882a593Smuzhiyun 			continue;
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 		seq_printf(s, "\n    %-50s  ", kobj_path);
3044*4882a593Smuzhiyun 		rtpm_status_str(s, pm_data->dev);
3045*4882a593Smuzhiyun 		kfree(kobj_path);
3046*4882a593Smuzhiyun 	}
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 	seq_puts(s, "\n");
3049*4882a593Smuzhiyun exit:
3050*4882a593Smuzhiyun 	genpd_unlock(genpd);
3051*4882a593Smuzhiyun 
3052*4882a593Smuzhiyun 	return 0;
3053*4882a593Smuzhiyun }
3054*4882a593Smuzhiyun 
summary_show(struct seq_file * s,void * data)3055*4882a593Smuzhiyun static int summary_show(struct seq_file *s, void *data)
3056*4882a593Smuzhiyun {
3057*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
3058*4882a593Smuzhiyun 	int ret = 0;
3059*4882a593Smuzhiyun 
3060*4882a593Smuzhiyun 	seq_puts(s, "domain                          status          children\n");
3061*4882a593Smuzhiyun 	seq_puts(s, "    /device                                             runtime status\n");
3062*4882a593Smuzhiyun 	seq_puts(s, "----------------------------------------------------------------------\n");
3063*4882a593Smuzhiyun 
3064*4882a593Smuzhiyun 	ret = mutex_lock_interruptible(&gpd_list_lock);
3065*4882a593Smuzhiyun 	if (ret)
3066*4882a593Smuzhiyun 		return -ERESTARTSYS;
3067*4882a593Smuzhiyun 
3068*4882a593Smuzhiyun 	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3069*4882a593Smuzhiyun 		ret = genpd_summary_one(s, genpd);
3070*4882a593Smuzhiyun 		if (ret)
3071*4882a593Smuzhiyun 			break;
3072*4882a593Smuzhiyun 	}
3073*4882a593Smuzhiyun 	mutex_unlock(&gpd_list_lock);
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	return ret;
3076*4882a593Smuzhiyun }
3077*4882a593Smuzhiyun 
status_show(struct seq_file * s,void * data)3078*4882a593Smuzhiyun static int status_show(struct seq_file *s, void *data)
3079*4882a593Smuzhiyun {
3080*4882a593Smuzhiyun 	static const char * const status_lookup[] = {
3081*4882a593Smuzhiyun 		[GENPD_STATE_ON] = "on",
3082*4882a593Smuzhiyun 		[GENPD_STATE_OFF] = "off"
3083*4882a593Smuzhiyun 	};
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3086*4882a593Smuzhiyun 	int ret = 0;
3087*4882a593Smuzhiyun 
3088*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3089*4882a593Smuzhiyun 	if (ret)
3090*4882a593Smuzhiyun 		return -ERESTARTSYS;
3091*4882a593Smuzhiyun 
3092*4882a593Smuzhiyun 	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3093*4882a593Smuzhiyun 		goto exit;
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun 	if (genpd->status == GENPD_STATE_OFF)
3096*4882a593Smuzhiyun 		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3097*4882a593Smuzhiyun 			genpd->state_idx);
3098*4882a593Smuzhiyun 	else
3099*4882a593Smuzhiyun 		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3100*4882a593Smuzhiyun exit:
3101*4882a593Smuzhiyun 	genpd_unlock(genpd);
3102*4882a593Smuzhiyun 	return ret;
3103*4882a593Smuzhiyun }
3104*4882a593Smuzhiyun 
sub_domains_show(struct seq_file * s,void * data)3105*4882a593Smuzhiyun static int sub_domains_show(struct seq_file *s, void *data)
3106*4882a593Smuzhiyun {
3107*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3108*4882a593Smuzhiyun 	struct gpd_link *link;
3109*4882a593Smuzhiyun 	int ret = 0;
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3112*4882a593Smuzhiyun 	if (ret)
3113*4882a593Smuzhiyun 		return -ERESTARTSYS;
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	list_for_each_entry(link, &genpd->parent_links, parent_node)
3116*4882a593Smuzhiyun 		seq_printf(s, "%s\n", link->child->name);
3117*4882a593Smuzhiyun 
3118*4882a593Smuzhiyun 	genpd_unlock(genpd);
3119*4882a593Smuzhiyun 	return ret;
3120*4882a593Smuzhiyun }
3121*4882a593Smuzhiyun 
idle_states_show(struct seq_file * s,void * data)3122*4882a593Smuzhiyun static int idle_states_show(struct seq_file *s, void *data)
3123*4882a593Smuzhiyun {
3124*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3125*4882a593Smuzhiyun 	unsigned int i;
3126*4882a593Smuzhiyun 	int ret = 0;
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3129*4882a593Smuzhiyun 	if (ret)
3130*4882a593Smuzhiyun 		return -ERESTARTSYS;
3131*4882a593Smuzhiyun 
3132*4882a593Smuzhiyun 	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun 	for (i = 0; i < genpd->state_count; i++) {
3135*4882a593Smuzhiyun 		ktime_t delta = 0;
3136*4882a593Smuzhiyun 		s64 msecs;
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun 		if ((genpd->status == GENPD_STATE_OFF) &&
3139*4882a593Smuzhiyun 				(genpd->state_idx == i))
3140*4882a593Smuzhiyun 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3141*4882a593Smuzhiyun 
3142*4882a593Smuzhiyun 		msecs = ktime_to_ms(
3143*4882a593Smuzhiyun 			ktime_add(genpd->states[i].idle_time, delta));
3144*4882a593Smuzhiyun 		seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3145*4882a593Smuzhiyun 			      genpd->states[i].usage, genpd->states[i].rejected);
3146*4882a593Smuzhiyun 	}
3147*4882a593Smuzhiyun 
3148*4882a593Smuzhiyun 	genpd_unlock(genpd);
3149*4882a593Smuzhiyun 	return ret;
3150*4882a593Smuzhiyun }
3151*4882a593Smuzhiyun 
active_time_show(struct seq_file * s,void * data)3152*4882a593Smuzhiyun static int active_time_show(struct seq_file *s, void *data)
3153*4882a593Smuzhiyun {
3154*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3155*4882a593Smuzhiyun 	ktime_t delta = 0;
3156*4882a593Smuzhiyun 	int ret = 0;
3157*4882a593Smuzhiyun 
3158*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3159*4882a593Smuzhiyun 	if (ret)
3160*4882a593Smuzhiyun 		return -ERESTARTSYS;
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun 	if (genpd->status == GENPD_STATE_ON)
3163*4882a593Smuzhiyun 		delta = ktime_sub(ktime_get(), genpd->accounting_time);
3164*4882a593Smuzhiyun 
3165*4882a593Smuzhiyun 	seq_printf(s, "%lld ms\n", ktime_to_ms(
3166*4882a593Smuzhiyun 				ktime_add(genpd->on_time, delta)));
3167*4882a593Smuzhiyun 
3168*4882a593Smuzhiyun 	genpd_unlock(genpd);
3169*4882a593Smuzhiyun 	return ret;
3170*4882a593Smuzhiyun }
3171*4882a593Smuzhiyun 
total_idle_time_show(struct seq_file * s,void * data)3172*4882a593Smuzhiyun static int total_idle_time_show(struct seq_file *s, void *data)
3173*4882a593Smuzhiyun {
3174*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3175*4882a593Smuzhiyun 	ktime_t delta = 0, total = 0;
3176*4882a593Smuzhiyun 	unsigned int i;
3177*4882a593Smuzhiyun 	int ret = 0;
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3180*4882a593Smuzhiyun 	if (ret)
3181*4882a593Smuzhiyun 		return -ERESTARTSYS;
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun 	for (i = 0; i < genpd->state_count; i++) {
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun 		if ((genpd->status == GENPD_STATE_OFF) &&
3186*4882a593Smuzhiyun 				(genpd->state_idx == i))
3187*4882a593Smuzhiyun 			delta = ktime_sub(ktime_get(), genpd->accounting_time);
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun 		total = ktime_add(total, genpd->states[i].idle_time);
3190*4882a593Smuzhiyun 	}
3191*4882a593Smuzhiyun 	total = ktime_add(total, delta);
3192*4882a593Smuzhiyun 
3193*4882a593Smuzhiyun 	seq_printf(s, "%lld ms\n", ktime_to_ms(total));
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun 	genpd_unlock(genpd);
3196*4882a593Smuzhiyun 	return ret;
3197*4882a593Smuzhiyun }
3198*4882a593Smuzhiyun 
3199*4882a593Smuzhiyun 
devices_show(struct seq_file * s,void * data)3200*4882a593Smuzhiyun static int devices_show(struct seq_file *s, void *data)
3201*4882a593Smuzhiyun {
3202*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3203*4882a593Smuzhiyun 	struct pm_domain_data *pm_data;
3204*4882a593Smuzhiyun 	const char *kobj_path;
3205*4882a593Smuzhiyun 	int ret = 0;
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	ret = genpd_lock_interruptible(genpd);
3208*4882a593Smuzhiyun 	if (ret)
3209*4882a593Smuzhiyun 		return -ERESTARTSYS;
3210*4882a593Smuzhiyun 
3211*4882a593Smuzhiyun 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3212*4882a593Smuzhiyun 		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3213*4882a593Smuzhiyun 				genpd_is_irq_safe(genpd) ?
3214*4882a593Smuzhiyun 				GFP_ATOMIC : GFP_KERNEL);
3215*4882a593Smuzhiyun 		if (kobj_path == NULL)
3216*4882a593Smuzhiyun 			continue;
3217*4882a593Smuzhiyun 
3218*4882a593Smuzhiyun 		seq_printf(s, "%s\n", kobj_path);
3219*4882a593Smuzhiyun 		kfree(kobj_path);
3220*4882a593Smuzhiyun 	}
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun 	genpd_unlock(genpd);
3223*4882a593Smuzhiyun 	return ret;
3224*4882a593Smuzhiyun }
3225*4882a593Smuzhiyun 
perf_state_show(struct seq_file * s,void * data)3226*4882a593Smuzhiyun static int perf_state_show(struct seq_file *s, void *data)
3227*4882a593Smuzhiyun {
3228*4882a593Smuzhiyun 	struct generic_pm_domain *genpd = s->private;
3229*4882a593Smuzhiyun 
3230*4882a593Smuzhiyun 	if (genpd_lock_interruptible(genpd))
3231*4882a593Smuzhiyun 		return -ERESTARTSYS;
3232*4882a593Smuzhiyun 
3233*4882a593Smuzhiyun 	seq_printf(s, "%u\n", genpd->performance_state);
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun 	genpd_unlock(genpd);
3236*4882a593Smuzhiyun 	return 0;
3237*4882a593Smuzhiyun }
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(summary);
3240*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(status);
3241*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(sub_domains);
3242*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(idle_states);
3243*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(active_time);
3244*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3245*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(devices);
3246*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(perf_state);
3247*4882a593Smuzhiyun 
genpd_debug_add(struct generic_pm_domain * genpd)3248*4882a593Smuzhiyun static void genpd_debug_add(struct generic_pm_domain *genpd)
3249*4882a593Smuzhiyun {
3250*4882a593Smuzhiyun 	struct dentry *d;
3251*4882a593Smuzhiyun 
3252*4882a593Smuzhiyun 	if (!genpd_debugfs_dir)
3253*4882a593Smuzhiyun 		return;
3254*4882a593Smuzhiyun 
3255*4882a593Smuzhiyun 	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3256*4882a593Smuzhiyun 
3257*4882a593Smuzhiyun 	debugfs_create_file("current_state", 0444,
3258*4882a593Smuzhiyun 			    d, genpd, &status_fops);
3259*4882a593Smuzhiyun 	debugfs_create_file("sub_domains", 0444,
3260*4882a593Smuzhiyun 			    d, genpd, &sub_domains_fops);
3261*4882a593Smuzhiyun 	debugfs_create_file("idle_states", 0444,
3262*4882a593Smuzhiyun 			    d, genpd, &idle_states_fops);
3263*4882a593Smuzhiyun 	debugfs_create_file("active_time", 0444,
3264*4882a593Smuzhiyun 			    d, genpd, &active_time_fops);
3265*4882a593Smuzhiyun 	debugfs_create_file("total_idle_time", 0444,
3266*4882a593Smuzhiyun 			    d, genpd, &total_idle_time_fops);
3267*4882a593Smuzhiyun 	debugfs_create_file("devices", 0444,
3268*4882a593Smuzhiyun 			    d, genpd, &devices_fops);
3269*4882a593Smuzhiyun 	if (genpd->set_performance_state)
3270*4882a593Smuzhiyun 		debugfs_create_file("perf_state", 0444,
3271*4882a593Smuzhiyun 				    d, genpd, &perf_state_fops);
3272*4882a593Smuzhiyun }
3273*4882a593Smuzhiyun 
genpd_debug_init(void)3274*4882a593Smuzhiyun static int __init genpd_debug_init(void)
3275*4882a593Smuzhiyun {
3276*4882a593Smuzhiyun 	struct generic_pm_domain *genpd;
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun 	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3281*4882a593Smuzhiyun 			    NULL, &summary_fops);
3282*4882a593Smuzhiyun 
3283*4882a593Smuzhiyun 	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3284*4882a593Smuzhiyun 		genpd_debug_add(genpd);
3285*4882a593Smuzhiyun 
3286*4882a593Smuzhiyun 	return 0;
3287*4882a593Smuzhiyun }
3288*4882a593Smuzhiyun late_initcall(genpd_debug_init);
3289*4882a593Smuzhiyun 
genpd_debug_exit(void)3290*4882a593Smuzhiyun static void __exit genpd_debug_exit(void)
3291*4882a593Smuzhiyun {
3292*4882a593Smuzhiyun 	debugfs_remove_recursive(genpd_debugfs_dir);
3293*4882a593Smuzhiyun }
3294*4882a593Smuzhiyun __exitcall(genpd_debug_exit);
3295*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
3296