1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * drivers/base/power/domain_governor.c - Governors for device PM domains.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/pm_domain.h>
9*4882a593Smuzhiyun #include <linux/pm_qos.h>
10*4882a593Smuzhiyun #include <linux/hrtimer.h>
11*4882a593Smuzhiyun #include <linux/cpuidle.h>
12*4882a593Smuzhiyun #include <linux/cpumask.h>
13*4882a593Smuzhiyun #include <linux/ktime.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <trace/hooks/pm_domain.h>
16*4882a593Smuzhiyun
dev_update_qos_constraint(struct device * dev,void * data)17*4882a593Smuzhiyun static int dev_update_qos_constraint(struct device *dev, void *data)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun s64 *constraint_ns_p = data;
20*4882a593Smuzhiyun s64 constraint_ns;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Only take suspend-time QoS constraints of devices into
25*4882a593Smuzhiyun * account, because constraints updated after the device has
26*4882a593Smuzhiyun * been suspended are not guaranteed to be taken into account
27*4882a593Smuzhiyun * anyway. In order for them to take effect, the device has to
28*4882a593Smuzhiyun * be resumed and suspended again.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
31*4882a593Smuzhiyun } else {
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * The child is not in a domain and there's no info on its
34*4882a593Smuzhiyun * suspend/resume latencies, so assume them to be negligible and
35*4882a593Smuzhiyun * take its current PM QoS constraint (that's the only thing
36*4882a593Smuzhiyun * known at this point anyway).
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
39*4882a593Smuzhiyun constraint_ns *= NSEC_PER_USEC;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (constraint_ns < *constraint_ns_p)
43*4882a593Smuzhiyun *constraint_ns_p = constraint_ns;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return 0;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun * default_suspend_ok - Default PM domain governor routine to suspend devices.
50*4882a593Smuzhiyun * @dev: Device to check.
51*4882a593Smuzhiyun */
default_suspend_ok(struct device * dev)52*4882a593Smuzhiyun static bool default_suspend_ok(struct device *dev)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
55*4882a593Smuzhiyun unsigned long flags;
56*4882a593Smuzhiyun s64 constraint_ns;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun dev_dbg(dev, "%s()\n", __func__);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun spin_lock_irqsave(&dev->power.lock, flags);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (!td->constraint_changed) {
63*4882a593Smuzhiyun bool ret = td->cached_suspend_ok;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
66*4882a593Smuzhiyun return ret;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun td->constraint_changed = false;
69*4882a593Smuzhiyun td->cached_suspend_ok = false;
70*4882a593Smuzhiyun td->effective_constraint_ns = 0;
71*4882a593Smuzhiyun constraint_ns = __dev_pm_qos_resume_latency(dev);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->power.lock, flags);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (constraint_ns == 0)
76*4882a593Smuzhiyun return false;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun constraint_ns *= NSEC_PER_USEC;
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * We can walk the children without any additional locking, because
81*4882a593Smuzhiyun * they all have been suspended at this point and their
82*4882a593Smuzhiyun * effective_constraint_ns fields won't be modified in parallel with us.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun if (!dev->power.ignore_children)
85*4882a593Smuzhiyun device_for_each_child(dev, &constraint_ns,
86*4882a593Smuzhiyun dev_update_qos_constraint);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
89*4882a593Smuzhiyun /* "No restriction", so the device is allowed to suspend. */
90*4882a593Smuzhiyun td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
91*4882a593Smuzhiyun td->cached_suspend_ok = true;
92*4882a593Smuzhiyun } else if (constraint_ns == 0) {
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * This triggers if one of the children that don't belong to a
95*4882a593Smuzhiyun * domain has a zero PM QoS constraint and it's better not to
96*4882a593Smuzhiyun * suspend then. effective_constraint_ns is zero already and
97*4882a593Smuzhiyun * cached_suspend_ok is false, so bail out.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun return false;
100*4882a593Smuzhiyun } else {
101*4882a593Smuzhiyun constraint_ns -= td->suspend_latency_ns +
102*4882a593Smuzhiyun td->resume_latency_ns;
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * effective_constraint_ns is zero already and cached_suspend_ok
105*4882a593Smuzhiyun * is false, so if the computed value is not positive, return
106*4882a593Smuzhiyun * right away.
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun if (constraint_ns <= 0)
109*4882a593Smuzhiyun return false;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun td->effective_constraint_ns = constraint_ns;
112*4882a593Smuzhiyun td->cached_suspend_ok = true;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * The children have been suspended already, so we don't need to take
117*4882a593Smuzhiyun * their suspend latencies into account here.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun return td->cached_suspend_ok;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
update_domain_next_wakeup(struct generic_pm_domain * genpd,ktime_t now)122*4882a593Smuzhiyun static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun ktime_t domain_wakeup = KTIME_MAX;
125*4882a593Smuzhiyun ktime_t next_wakeup;
126*4882a593Smuzhiyun struct pm_domain_data *pdd;
127*4882a593Smuzhiyun struct gpd_link *link;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
130*4882a593Smuzhiyun return;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Devices that have a predictable wakeup pattern, may specify
134*4882a593Smuzhiyun * their next wakeup. Let's find the next wakeup from all the
135*4882a593Smuzhiyun * devices attached to this domain and from all the sub-domains.
136*4882a593Smuzhiyun * It is possible that component's a next wakeup may have become
137*4882a593Smuzhiyun * stale when we read that here. We will ignore to ensure the domain
138*4882a593Smuzhiyun * is able to enter its optimal idle state.
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun list_for_each_entry(pdd, &genpd->dev_list, list_node) {
141*4882a593Smuzhiyun next_wakeup = to_gpd_data(pdd)->next_wakeup;
142*4882a593Smuzhiyun if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
143*4882a593Smuzhiyun if (ktime_before(next_wakeup, domain_wakeup))
144*4882a593Smuzhiyun domain_wakeup = next_wakeup;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun list_for_each_entry(link, &genpd->parent_links, parent_node) {
148*4882a593Smuzhiyun next_wakeup = link->child->next_wakeup;
149*4882a593Smuzhiyun if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
150*4882a593Smuzhiyun if (ktime_before(next_wakeup, domain_wakeup))
151*4882a593Smuzhiyun domain_wakeup = next_wakeup;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun genpd->next_wakeup = domain_wakeup;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
next_wakeup_allows_state(struct generic_pm_domain * genpd,unsigned int state,ktime_t now)157*4882a593Smuzhiyun static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
158*4882a593Smuzhiyun unsigned int state, ktime_t now)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun ktime_t domain_wakeup = genpd->next_wakeup;
161*4882a593Smuzhiyun s64 idle_time_ns, min_sleep_ns;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun min_sleep_ns = genpd->states[state].power_off_latency_ns +
164*4882a593Smuzhiyun genpd->states[state].residency_ns;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return idle_time_ns >= min_sleep_ns;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
__default_power_down_ok(struct dev_pm_domain * pd,unsigned int state)171*4882a593Smuzhiyun static bool __default_power_down_ok(struct dev_pm_domain *pd,
172*4882a593Smuzhiyun unsigned int state)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct generic_pm_domain *genpd = pd_to_genpd(pd);
175*4882a593Smuzhiyun struct gpd_link *link;
176*4882a593Smuzhiyun struct pm_domain_data *pdd;
177*4882a593Smuzhiyun s64 min_off_time_ns;
178*4882a593Smuzhiyun s64 off_on_time_ns;
179*4882a593Smuzhiyun bool allow = true;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun trace_android_vh_allow_domain_state(genpd, state, &allow);
182*4882a593Smuzhiyun if (!allow)
183*4882a593Smuzhiyun return false;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun off_on_time_ns = genpd->states[state].power_off_latency_ns +
186*4882a593Smuzhiyun genpd->states[state].power_on_latency_ns;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun min_off_time_ns = -1;
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Check if subdomains can be off for enough time.
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * All subdomains have been powered off already at this point.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun list_for_each_entry(link, &genpd->parent_links, parent_node) {
195*4882a593Smuzhiyun struct generic_pm_domain *sd = link->child;
196*4882a593Smuzhiyun s64 sd_max_off_ns = sd->max_off_time_ns;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (sd_max_off_ns < 0)
199*4882a593Smuzhiyun continue;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Check if the subdomain is allowed to be off long enough for
203*4882a593Smuzhiyun * the current domain to turn off and on (that's how much time
204*4882a593Smuzhiyun * it will have to wait worst case).
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun if (sd_max_off_ns <= off_on_time_ns)
207*4882a593Smuzhiyun return false;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
210*4882a593Smuzhiyun min_off_time_ns = sd_max_off_ns;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * Check if the devices in the domain can be off enough time.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun list_for_each_entry(pdd, &genpd->dev_list, list_node) {
217*4882a593Smuzhiyun struct gpd_timing_data *td;
218*4882a593Smuzhiyun s64 constraint_ns;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Check if the device is allowed to be off long enough for the
222*4882a593Smuzhiyun * domain to turn off and on (that's how much time it will
223*4882a593Smuzhiyun * have to wait worst case).
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun td = &to_gpd_data(pdd)->td;
226*4882a593Smuzhiyun constraint_ns = td->effective_constraint_ns;
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Zero means "no suspend at all" and this runs only when all
229*4882a593Smuzhiyun * devices in the domain are suspended, so it must be positive.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
232*4882a593Smuzhiyun continue;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (constraint_ns <= off_on_time_ns)
235*4882a593Smuzhiyun return false;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
238*4882a593Smuzhiyun min_off_time_ns = constraint_ns;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun * If the computed minimum device off time is negative, there are no
243*4882a593Smuzhiyun * latency constraints, so the domain can spend arbitrary time in the
244*4882a593Smuzhiyun * "off" state.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun if (min_off_time_ns < 0)
247*4882a593Smuzhiyun return true;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * The difference between the computed minimum subdomain or device off
251*4882a593Smuzhiyun * time and the time needed to turn the domain on is the maximum
252*4882a593Smuzhiyun * theoretical time this domain can spend in the "off" state.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun genpd->max_off_time_ns = min_off_time_ns -
255*4882a593Smuzhiyun genpd->states[state].power_on_latency_ns;
256*4882a593Smuzhiyun return true;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun * _default_power_down_ok - Default generic PM domain power off governor routine.
261*4882a593Smuzhiyun * @pd: PM domain to check.
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun * This routine must be executed under the PM domain's lock.
264*4882a593Smuzhiyun */
_default_power_down_ok(struct dev_pm_domain * pd,ktime_t now)265*4882a593Smuzhiyun static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct generic_pm_domain *genpd = pd_to_genpd(pd);
268*4882a593Smuzhiyun int state_idx = genpd->state_count - 1;
269*4882a593Smuzhiyun struct gpd_link *link;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Find the next wakeup from devices that can determine their own wakeup
273*4882a593Smuzhiyun * to find when the domain would wakeup and do it for every device down
274*4882a593Smuzhiyun * the hierarchy. It is not worth while to sleep if the state's residency
275*4882a593Smuzhiyun * cannot be met.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun update_domain_next_wakeup(genpd, now);
278*4882a593Smuzhiyun if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
279*4882a593Smuzhiyun /* Let's find out the deepest domain idle state, the devices prefer */
280*4882a593Smuzhiyun while (state_idx >= 0) {
281*4882a593Smuzhiyun if (next_wakeup_allows_state(genpd, state_idx, now)) {
282*4882a593Smuzhiyun genpd->max_off_time_changed = true;
283*4882a593Smuzhiyun break;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun state_idx--;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (state_idx < 0) {
289*4882a593Smuzhiyun state_idx = 0;
290*4882a593Smuzhiyun genpd->cached_power_down_ok = false;
291*4882a593Smuzhiyun goto done;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (!genpd->max_off_time_changed) {
296*4882a593Smuzhiyun genpd->state_idx = genpd->cached_power_down_state_idx;
297*4882a593Smuzhiyun return genpd->cached_power_down_ok;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * We have to invalidate the cached results for the parents, so
302*4882a593Smuzhiyun * use the observation that default_power_down_ok() is not
303*4882a593Smuzhiyun * going to be called for any parent until this instance
304*4882a593Smuzhiyun * returns.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun list_for_each_entry(link, &genpd->child_links, child_node)
307*4882a593Smuzhiyun link->parent->max_off_time_changed = true;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun genpd->max_off_time_ns = -1;
310*4882a593Smuzhiyun genpd->max_off_time_changed = false;
311*4882a593Smuzhiyun genpd->cached_power_down_ok = true;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /*
314*4882a593Smuzhiyun * Find a state to power down to, starting from the state
315*4882a593Smuzhiyun * determined by the next wakeup.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun while (!__default_power_down_ok(pd, state_idx)) {
318*4882a593Smuzhiyun if (state_idx == 0) {
319*4882a593Smuzhiyun genpd->cached_power_down_ok = false;
320*4882a593Smuzhiyun break;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun state_idx--;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun done:
326*4882a593Smuzhiyun genpd->state_idx = state_idx;
327*4882a593Smuzhiyun genpd->cached_power_down_state_idx = genpd->state_idx;
328*4882a593Smuzhiyun return genpd->cached_power_down_ok;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
default_power_down_ok(struct dev_pm_domain * pd)331*4882a593Smuzhiyun static bool default_power_down_ok(struct dev_pm_domain *pd)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun return _default_power_down_ok(pd, ktime_get());
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
always_on_power_down_ok(struct dev_pm_domain * domain)336*4882a593Smuzhiyun static bool always_on_power_down_ok(struct dev_pm_domain *domain)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun return false;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #ifdef CONFIG_CPU_IDLE
cpu_power_down_ok(struct dev_pm_domain * pd)342*4882a593Smuzhiyun static bool cpu_power_down_ok(struct dev_pm_domain *pd)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct generic_pm_domain *genpd = pd_to_genpd(pd);
345*4882a593Smuzhiyun struct cpuidle_device *dev;
346*4882a593Smuzhiyun ktime_t domain_wakeup, next_hrtimer;
347*4882a593Smuzhiyun ktime_t now = ktime_get();
348*4882a593Smuzhiyun s64 idle_duration_ns;
349*4882a593Smuzhiyun int cpu, i;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* Validate dev PM QoS constraints. */
352*4882a593Smuzhiyun if (!_default_power_down_ok(pd, now))
353*4882a593Smuzhiyun return false;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
356*4882a593Smuzhiyun return true;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * Find the next wakeup for any of the online CPUs within the PM domain
360*4882a593Smuzhiyun * and its subdomains. Note, we only need the genpd->cpus, as it already
361*4882a593Smuzhiyun * contains a mask of all CPUs from subdomains.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
364*4882a593Smuzhiyun for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
365*4882a593Smuzhiyun dev = per_cpu(cpuidle_devices, cpu);
366*4882a593Smuzhiyun if (dev) {
367*4882a593Smuzhiyun next_hrtimer = READ_ONCE(dev->next_hrtimer);
368*4882a593Smuzhiyun if (ktime_before(next_hrtimer, domain_wakeup))
369*4882a593Smuzhiyun domain_wakeup = next_hrtimer;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* The minimum idle duration is from now - until the next wakeup. */
374*4882a593Smuzhiyun idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
375*4882a593Smuzhiyun if (idle_duration_ns <= 0)
376*4882a593Smuzhiyun return false;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * Find the deepest idle state that has its residency value satisfied
380*4882a593Smuzhiyun * and by also taking into account the power off latency for the state.
381*4882a593Smuzhiyun * Start at the state picked by the dev PM QoS constraint validation.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun i = genpd->state_idx;
384*4882a593Smuzhiyun do {
385*4882a593Smuzhiyun if (idle_duration_ns >= (genpd->states[i].residency_ns +
386*4882a593Smuzhiyun genpd->states[i].power_off_latency_ns)) {
387*4882a593Smuzhiyun genpd->state_idx = i;
388*4882a593Smuzhiyun return true;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun } while (--i >= 0);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return false;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun struct dev_power_governor pm_domain_cpu_gov = {
396*4882a593Smuzhiyun .suspend_ok = default_suspend_ok,
397*4882a593Smuzhiyun .power_down_ok = cpu_power_down_ok,
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun #endif
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun struct dev_power_governor simple_qos_governor = {
402*4882a593Smuzhiyun .suspend_ok = default_suspend_ok,
403*4882a593Smuzhiyun .power_down_ok = default_power_down_ok,
404*4882a593Smuzhiyun };
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /**
407*4882a593Smuzhiyun * pm_genpd_gov_always_on - A governor implementing an always-on policy
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun struct dev_power_governor pm_domain_always_on_gov = {
410*4882a593Smuzhiyun .power_down_ok = always_on_power_down_ok,
411*4882a593Smuzhiyun .suspend_ok = default_suspend_ok,
412*4882a593Smuzhiyun };
413