Lines Matching refs:genpd
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ argument
35 __routine = genpd->dev_ops.callback; \
46 void (*lock)(struct generic_pm_domain *genpd);
47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 void (*unlock)(struct generic_pm_domain *genpd);
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd) in genpd_lock_mtx() argument
54 mutex_lock(&genpd->mlock); in genpd_lock_mtx()
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, in genpd_lock_nested_mtx() argument
60 mutex_lock_nested(&genpd->mlock, depth); in genpd_lock_nested_mtx()
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) in genpd_lock_interruptible_mtx() argument
65 return mutex_lock_interruptible(&genpd->mlock); in genpd_lock_interruptible_mtx()
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd) in genpd_unlock_mtx() argument
70 return mutex_unlock(&genpd->mlock); in genpd_unlock_mtx()
80 static void genpd_lock_spin(struct generic_pm_domain *genpd) in genpd_lock_spin() argument
81 __acquires(&genpd->slock) in genpd_lock_spin()
85 spin_lock_irqsave(&genpd->slock, flags); in genpd_lock_spin()
86 genpd->lock_flags = flags; in genpd_lock_spin()
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, in genpd_lock_nested_spin() argument
91 __acquires(&genpd->slock) in genpd_lock_nested_spin()
95 spin_lock_irqsave_nested(&genpd->slock, flags, depth); in genpd_lock_nested_spin()
96 genpd->lock_flags = flags; in genpd_lock_nested_spin()
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) in genpd_lock_interruptible_spin() argument
100 __acquires(&genpd->slock) in genpd_lock_interruptible_spin()
104 spin_lock_irqsave(&genpd->slock, flags); in genpd_lock_interruptible_spin()
105 genpd->lock_flags = flags; in genpd_lock_interruptible_spin()
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd) in genpd_unlock_spin() argument
110 __releases(&genpd->slock) in genpd_unlock_spin()
112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); in genpd_unlock_spin()
127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) argument
128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) argument
129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) argument
130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) argument
131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) argument
132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) argument
135 const struct generic_pm_domain *genpd) in irq_safe_dev_in_no_sleep_domain() argument
139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); in irq_safe_dev_in_no_sleep_domain()
146 if (ret && !genpd_is_always_on(genpd)) in irq_safe_dev_in_no_sleep_domain()
148 genpd->name); in irq_safe_dev_in_no_sleep_domain()
185 static int genpd_stop_dev(const struct generic_pm_domain *genpd, in genpd_stop_dev() argument
188 return GENPD_DEV_CALLBACK(genpd, int, stop, dev); in genpd_stop_dev()
191 static int genpd_start_dev(const struct generic_pm_domain *genpd, in genpd_start_dev() argument
194 return GENPD_DEV_CALLBACK(genpd, int, start, dev); in genpd_start_dev()
197 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) in genpd_sd_counter_dec() argument
201 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) in genpd_sd_counter_dec()
202 ret = !!atomic_dec_and_test(&genpd->sd_count); in genpd_sd_counter_dec()
207 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) in genpd_sd_counter_inc() argument
209 atomic_inc(&genpd->sd_count); in genpd_sd_counter_inc()
216 static void genpd_debug_add(struct generic_pm_domain *genpd);
218 static void genpd_debug_remove(struct generic_pm_domain *genpd) in genpd_debug_remove() argument
225 d = debugfs_lookup(genpd->name, genpd_debugfs_dir); in genpd_debug_remove()
229 static void genpd_update_accounting(struct generic_pm_domain *genpd) in genpd_update_accounting() argument
234 delta = ktime_sub(now, genpd->accounting_time); in genpd_update_accounting()
241 if (genpd->status == GENPD_STATE_ON) { in genpd_update_accounting()
242 int state_idx = genpd->state_idx; in genpd_update_accounting()
244 genpd->states[state_idx].idle_time = in genpd_update_accounting()
245 ktime_add(genpd->states[state_idx].idle_time, delta); in genpd_update_accounting()
247 genpd->on_time = ktime_add(genpd->on_time, delta); in genpd_update_accounting()
250 genpd->accounting_time = now; in genpd_update_accounting()
253 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {} in genpd_debug_add() argument
254 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {} in genpd_debug_remove() argument
255 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} in genpd_update_accounting() argument
258 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, in _genpd_reeval_performance_state() argument
266 if (state == genpd->performance_state) in _genpd_reeval_performance_state()
270 if (state > genpd->performance_state) in _genpd_reeval_performance_state()
274 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in _genpd_reeval_performance_state()
295 list_for_each_entry(link, &genpd->parent_links, parent_node) { in _genpd_reeval_performance_state()
303 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, in _genpd_set_performance_state() argument
310 if (state == genpd->performance_state) in _genpd_set_performance_state()
314 list_for_each_entry(link, &genpd->child_links, child_node) { in _genpd_set_performance_state()
321 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, in _genpd_set_performance_state()
345 ret = genpd->set_performance_state(genpd, state); in _genpd_set_performance_state()
349 genpd->performance_state = state; in _genpd_set_performance_state()
354 list_for_each_entry_continue_reverse(link, &genpd->child_links, in _genpd_set_performance_state()
396 struct generic_pm_domain *genpd; in dev_pm_genpd_set_performance_state() local
401 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_set_performance_state()
402 if (!genpd) in dev_pm_genpd_set_performance_state()
405 if (unlikely(!genpd->set_performance_state)) in dev_pm_genpd_set_performance_state()
412 genpd_lock(genpd); in dev_pm_genpd_set_performance_state()
418 state = _genpd_reeval_performance_state(genpd, state); in dev_pm_genpd_set_performance_state()
419 ret = _genpd_set_performance_state(genpd, state, 0); in dev_pm_genpd_set_performance_state()
423 genpd_unlock(genpd); in dev_pm_genpd_set_performance_state()
447 struct generic_pm_domain *genpd; in dev_pm_genpd_set_next_wakeup() local
449 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_set_next_wakeup()
450 if (!genpd) in dev_pm_genpd_set_next_wakeup()
458 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) in _genpd_power_on() argument
460 unsigned int state_idx = genpd->state_idx; in _genpd_power_on()
466 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, in _genpd_power_on()
473 if (!genpd->power_on) in _genpd_power_on()
477 ret = genpd->power_on(genpd); in _genpd_power_on()
485 ret = genpd->power_on(genpd); in _genpd_power_on()
490 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) in _genpd_power_on()
493 genpd->states[state_idx].power_on_latency_ns = elapsed_ns; in _genpd_power_on()
494 genpd->max_off_time_changed = true; in _genpd_power_on()
496 genpd->name, "on", elapsed_ns); in _genpd_power_on()
499 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); in _genpd_power_on()
502 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, in _genpd_power_on()
507 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) in _genpd_power_off() argument
509 unsigned int state_idx = genpd->state_idx; in _genpd_power_off()
515 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, in _genpd_power_off()
522 if (!genpd->power_off) in _genpd_power_off()
526 ret = genpd->power_off(genpd); in _genpd_power_off()
534 ret = genpd->power_off(genpd); in _genpd_power_off()
539 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) in _genpd_power_off()
542 genpd->states[state_idx].power_off_latency_ns = elapsed_ns; in _genpd_power_off()
543 genpd->max_off_time_changed = true; in _genpd_power_off()
545 genpd->name, "off", elapsed_ns); in _genpd_power_off()
548 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, in _genpd_power_off()
552 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); in _genpd_power_off()
563 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) in genpd_queue_power_off_work() argument
565 queue_work(pm_wq, &genpd->power_off_work); in genpd_queue_power_off_work()
579 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, in genpd_power_off() argument
592 if (!genpd_status_on(genpd) || genpd->prepared_count > 0) in genpd_power_off()
600 if (genpd_is_always_on(genpd) || in genpd_power_off()
601 genpd_is_rpm_always_on(genpd) || in genpd_power_off()
602 atomic_read(&genpd->sd_count) > 0) in genpd_power_off()
605 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in genpd_power_off()
617 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) in genpd_power_off()
624 if (genpd->gov && genpd->gov->power_down_ok) { in genpd_power_off()
625 if (!genpd->gov->power_down_ok(&genpd->domain)) in genpd_power_off()
630 if (!genpd->gov) in genpd_power_off()
631 genpd->state_idx = 0; in genpd_power_off()
634 if (atomic_read(&genpd->sd_count) > 0) in genpd_power_off()
637 ret = _genpd_power_off(genpd, true); in genpd_power_off()
639 genpd->states[genpd->state_idx].rejected++; in genpd_power_off()
643 genpd->status = GENPD_STATE_OFF; in genpd_power_off()
644 genpd_update_accounting(genpd); in genpd_power_off()
645 genpd->states[genpd->state_idx].usage++; in genpd_power_off()
647 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_power_off()
665 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) in genpd_power_on() argument
670 if (genpd_status_on(genpd)) in genpd_power_on()
678 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_power_on()
693 ret = _genpd_power_on(genpd, true); in genpd_power_on()
697 genpd->status = GENPD_STATE_ON; in genpd_power_on()
698 genpd_update_accounting(genpd); in genpd_power_on()
704 &genpd->child_links, in genpd_power_on()
717 struct generic_pm_domain *genpd = dev_to_genpd(dev); in genpd_dev_pm_start() local
719 return genpd_start_dev(genpd, dev); in genpd_dev_pm_start()
732 struct generic_pm_domain *genpd; in genpd_dev_pm_qos_notifier() local
741 genpd = dev_to_genpd(dev); in genpd_dev_pm_qos_notifier()
743 genpd = ERR_PTR(-ENODATA); in genpd_dev_pm_qos_notifier()
748 if (!IS_ERR(genpd)) { in genpd_dev_pm_qos_notifier()
749 genpd_lock(genpd); in genpd_dev_pm_qos_notifier()
750 genpd->max_off_time_changed = true; in genpd_dev_pm_qos_notifier()
751 genpd_unlock(genpd); in genpd_dev_pm_qos_notifier()
768 struct generic_pm_domain *genpd; in genpd_power_off_work_fn() local
770 genpd = container_of(work, struct generic_pm_domain, power_off_work); in genpd_power_off_work_fn()
772 genpd_lock(genpd); in genpd_power_off_work_fn()
773 genpd_power_off(genpd, false, 0); in genpd_power_off_work_fn()
774 genpd_unlock(genpd); in genpd_power_off_work_fn()
833 struct generic_pm_domain *genpd; in genpd_runtime_suspend() local
843 genpd = dev_to_genpd(dev); in genpd_runtime_suspend()
844 if (IS_ERR(genpd)) in genpd_runtime_suspend()
853 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; in genpd_runtime_suspend()
866 ret = genpd_stop_dev(genpd, dev); in genpd_runtime_suspend()
879 genpd->max_off_time_changed = true; in genpd_runtime_suspend()
888 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) in genpd_runtime_suspend()
891 genpd_lock(genpd); in genpd_runtime_suspend()
892 genpd_power_off(genpd, true, 0); in genpd_runtime_suspend()
893 genpd_unlock(genpd); in genpd_runtime_suspend()
908 struct generic_pm_domain *genpd; in genpd_runtime_resume() local
918 genpd = dev_to_genpd(dev); in genpd_runtime_resume()
919 if (IS_ERR(genpd)) in genpd_runtime_resume()
926 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { in genpd_runtime_resume()
931 genpd_lock(genpd); in genpd_runtime_resume()
932 ret = genpd_power_on(genpd, 0); in genpd_runtime_resume()
933 genpd_unlock(genpd); in genpd_runtime_resume()
944 ret = genpd_start_dev(genpd, dev); in genpd_runtime_resume()
959 genpd->max_off_time_changed = true; in genpd_runtime_resume()
967 genpd_stop_dev(genpd, dev); in genpd_runtime_resume()
970 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { in genpd_runtime_resume()
971 genpd_lock(genpd); in genpd_runtime_resume()
972 genpd_power_off(genpd, true, 0); in genpd_runtime_resume()
973 genpd_unlock(genpd); in genpd_runtime_resume()
992 struct generic_pm_domain *genpd; in genpd_power_off_unused() local
1001 list_for_each_entry(genpd, &gpd_list, gpd_list_node) in genpd_power_off_unused()
1002 genpd_queue_power_off_work(genpd); in genpd_power_off_unused()
1025 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, in genpd_sync_power_off() argument
1030 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) in genpd_sync_power_off()
1033 if (genpd->suspended_count != genpd->device_count in genpd_sync_power_off()
1034 || atomic_read(&genpd->sd_count) > 0) in genpd_sync_power_off()
1038 genpd->state_idx = genpd->state_count - 1; in genpd_sync_power_off()
1039 if (_genpd_power_off(genpd, false)) in genpd_sync_power_off()
1042 genpd->status = GENPD_STATE_OFF; in genpd_sync_power_off()
1044 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_sync_power_off()
1067 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, in genpd_sync_power_on() argument
1072 if (genpd_status_on(genpd)) in genpd_sync_power_on()
1075 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_sync_power_on()
1087 _genpd_power_on(genpd, false); in genpd_sync_power_on()
1088 genpd->status = GENPD_STATE_ON; in genpd_sync_power_on()
1108 const struct generic_pm_domain *genpd) in resume_needed() argument
1115 active_wakeup = genpd_is_active_wakeup(genpd); in resume_needed()
1130 struct generic_pm_domain *genpd; in genpd_prepare() local
1135 genpd = dev_to_genpd(dev); in genpd_prepare()
1136 if (IS_ERR(genpd)) in genpd_prepare()
1144 if (resume_needed(dev, genpd)) in genpd_prepare()
1147 genpd_lock(genpd); in genpd_prepare()
1149 if (genpd->prepared_count++ == 0) in genpd_prepare()
1150 genpd->suspended_count = 0; in genpd_prepare()
1152 genpd_unlock(genpd); in genpd_prepare()
1156 genpd_lock(genpd); in genpd_prepare()
1158 genpd->prepared_count--; in genpd_prepare()
1160 genpd_unlock(genpd); in genpd_prepare()
1178 struct generic_pm_domain *genpd; in genpd_finish_suspend() local
1181 genpd = dev_to_genpd(dev); in genpd_finish_suspend()
1182 if (IS_ERR(genpd)) in genpd_finish_suspend()
1192 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) in genpd_finish_suspend()
1195 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_finish_suspend()
1197 ret = genpd_stop_dev(genpd, dev); in genpd_finish_suspend()
1207 genpd_lock(genpd); in genpd_finish_suspend()
1208 genpd->suspended_count++; in genpd_finish_suspend()
1209 genpd_sync_power_off(genpd, true, 0); in genpd_finish_suspend()
1210 genpd_unlock(genpd); in genpd_finish_suspend()
1237 struct generic_pm_domain *genpd; in genpd_resume_noirq() local
1242 genpd = dev_to_genpd(dev); in genpd_resume_noirq()
1243 if (IS_ERR(genpd)) in genpd_resume_noirq()
1246 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) in genpd_resume_noirq()
1249 genpd_lock(genpd); in genpd_resume_noirq()
1250 genpd_sync_power_on(genpd, true, 0); in genpd_resume_noirq()
1251 genpd->suspended_count--; in genpd_resume_noirq()
1252 genpd_unlock(genpd); in genpd_resume_noirq()
1254 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_resume_noirq()
1256 ret = genpd_start_dev(genpd, dev); in genpd_resume_noirq()
1275 const struct generic_pm_domain *genpd; in genpd_freeze_noirq() local
1280 genpd = dev_to_genpd(dev); in genpd_freeze_noirq()
1281 if (IS_ERR(genpd)) in genpd_freeze_noirq()
1288 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_freeze_noirq()
1290 ret = genpd_stop_dev(genpd, dev); in genpd_freeze_noirq()
1304 const struct generic_pm_domain *genpd; in genpd_thaw_noirq() local
1309 genpd = dev_to_genpd(dev); in genpd_thaw_noirq()
1310 if (IS_ERR(genpd)) in genpd_thaw_noirq()
1313 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_thaw_noirq()
1315 ret = genpd_start_dev(genpd, dev); in genpd_thaw_noirq()
1347 struct generic_pm_domain *genpd; in genpd_restore_noirq() local
1352 genpd = dev_to_genpd(dev); in genpd_restore_noirq()
1353 if (IS_ERR(genpd)) in genpd_restore_noirq()
1360 genpd_lock(genpd); in genpd_restore_noirq()
1361 if (genpd->suspended_count++ == 0) { in genpd_restore_noirq()
1367 genpd->status = GENPD_STATE_OFF; in genpd_restore_noirq()
1370 genpd_sync_power_on(genpd, true, 0); in genpd_restore_noirq()
1371 genpd_unlock(genpd); in genpd_restore_noirq()
1373 if (genpd->dev_ops.stop && genpd->dev_ops.start && in genpd_restore_noirq()
1375 ret = genpd_start_dev(genpd, dev); in genpd_restore_noirq()
1394 struct generic_pm_domain *genpd; in genpd_complete() local
1398 genpd = dev_to_genpd(dev); in genpd_complete()
1399 if (IS_ERR(genpd)) in genpd_complete()
1404 genpd_lock(genpd); in genpd_complete()
1406 genpd->prepared_count--; in genpd_complete()
1407 if (!genpd->prepared_count) in genpd_complete()
1408 genpd_queue_power_off_work(genpd); in genpd_complete()
1410 genpd_unlock(genpd); in genpd_complete()
1415 struct generic_pm_domain *genpd; in genpd_switch_state() local
1418 genpd = dev_to_genpd_safe(dev); in genpd_switch_state()
1419 if (!genpd) in genpd_switch_state()
1422 use_lock = genpd_is_irq_safe(genpd); in genpd_switch_state()
1425 genpd_lock(genpd); in genpd_switch_state()
1428 genpd->suspended_count++; in genpd_switch_state()
1429 genpd_sync_power_off(genpd, use_lock, 0); in genpd_switch_state()
1431 genpd_sync_power_on(genpd, use_lock, 0); in genpd_switch_state()
1432 genpd->suspended_count--; in genpd_switch_state()
1436 genpd_unlock(genpd); in genpd_switch_state()
1536 static void genpd_update_cpumask(struct generic_pm_domain *genpd, in genpd_update_cpumask() argument
1541 if (!genpd_is_cpu_domain(genpd)) in genpd_update_cpumask()
1544 list_for_each_entry(link, &genpd->child_links, child_node) { in genpd_update_cpumask()
1553 cpumask_set_cpu(cpu, genpd->cpus); in genpd_update_cpumask()
1555 cpumask_clear_cpu(cpu, genpd->cpus); in genpd_update_cpumask()
1558 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) in genpd_set_cpumask() argument
1561 genpd_update_cpumask(genpd, cpu, true, 0); in genpd_set_cpumask()
1564 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) in genpd_clear_cpumask() argument
1567 genpd_update_cpumask(genpd, cpu, false, 0); in genpd_clear_cpumask()
1570 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) in genpd_get_cpu() argument
1574 if (!genpd_is_cpu_domain(genpd)) in genpd_get_cpu()
1585 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, in genpd_add_device() argument
1593 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) in genpd_add_device()
1600 gpd_data->cpu = genpd_get_cpu(genpd, base_dev); in genpd_add_device()
1602 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; in genpd_add_device()
1606 genpd_lock(genpd); in genpd_add_device()
1608 genpd_set_cpumask(genpd, gpd_data->cpu); in genpd_add_device()
1609 dev_pm_domain_set(dev, &genpd->domain); in genpd_add_device()
1611 genpd->device_count++; in genpd_add_device()
1612 genpd->max_off_time_changed = true; in genpd_add_device()
1614 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); in genpd_add_device()
1616 genpd_unlock(genpd); in genpd_add_device()
1632 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) in pm_genpd_add_device() argument
1637 ret = genpd_add_device(genpd, dev, dev); in pm_genpd_add_device()
1644 static int genpd_remove_device(struct generic_pm_domain *genpd, in genpd_remove_device() argument
1658 genpd_lock(genpd); in genpd_remove_device()
1660 if (genpd->prepared_count > 0) { in genpd_remove_device()
1665 genpd->device_count--; in genpd_remove_device()
1666 genpd->max_off_time_changed = true; in genpd_remove_device()
1668 genpd_clear_cpumask(genpd, gpd_data->cpu); in genpd_remove_device()
1673 genpd_unlock(genpd); in genpd_remove_device()
1675 if (genpd->detach_dev) in genpd_remove_device()
1676 genpd->detach_dev(genpd, dev); in genpd_remove_device()
1683 genpd_unlock(genpd); in genpd_remove_device()
1695 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); in pm_genpd_remove_device() local
1697 if (!genpd) in pm_genpd_remove_device()
1700 return genpd_remove_device(genpd, dev); in pm_genpd_remove_device()
1721 struct generic_pm_domain *genpd; in dev_pm_genpd_add_notifier() local
1725 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_add_notifier()
1726 if (!genpd) in dev_pm_genpd_add_notifier()
1737 genpd_lock(genpd); in dev_pm_genpd_add_notifier()
1738 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); in dev_pm_genpd_add_notifier()
1739 genpd_unlock(genpd); in dev_pm_genpd_add_notifier()
1743 genpd->name); in dev_pm_genpd_add_notifier()
1767 struct generic_pm_domain *genpd; in dev_pm_genpd_remove_notifier() local
1771 genpd = dev_to_genpd_safe(dev); in dev_pm_genpd_remove_notifier()
1772 if (!genpd) in dev_pm_genpd_remove_notifier()
1783 genpd_lock(genpd); in dev_pm_genpd_remove_notifier()
1784 ret = raw_notifier_chain_unregister(&genpd->power_notifiers, in dev_pm_genpd_remove_notifier()
1786 genpd_unlock(genpd); in dev_pm_genpd_remove_notifier()
1790 genpd->name); in dev_pm_genpd_remove_notifier()
1799 static int genpd_add_subdomain(struct generic_pm_domain *genpd, in genpd_add_subdomain() argument
1805 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) in genpd_add_subdomain()
1806 || genpd == subdomain) in genpd_add_subdomain()
1814 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { in genpd_add_subdomain()
1816 genpd->name, subdomain->name); in genpd_add_subdomain()
1825 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); in genpd_add_subdomain()
1827 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { in genpd_add_subdomain()
1832 list_for_each_entry(itr, &genpd->parent_links, parent_node) { in genpd_add_subdomain()
1833 if (itr->child == subdomain && itr->parent == genpd) { in genpd_add_subdomain()
1839 link->parent = genpd; in genpd_add_subdomain()
1840 list_add_tail(&link->parent_node, &genpd->parent_links); in genpd_add_subdomain()
1844 genpd_sd_counter_inc(genpd); in genpd_add_subdomain()
1847 genpd_unlock(genpd); in genpd_add_subdomain()
1859 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, in pm_genpd_add_subdomain() argument
1865 ret = genpd_add_subdomain(genpd, subdomain); in pm_genpd_add_subdomain()
1877 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, in pm_genpd_remove_subdomain() argument
1883 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) in pm_genpd_remove_subdomain()
1887 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); in pm_genpd_remove_subdomain()
1891 genpd->name, subdomain->name); in pm_genpd_remove_subdomain()
1896 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { in pm_genpd_remove_subdomain()
1904 genpd_sd_counter_dec(genpd); in pm_genpd_remove_subdomain()
1911 genpd_unlock(genpd); in pm_genpd_remove_subdomain()
1924 static int genpd_set_default_power_state(struct generic_pm_domain *genpd) in genpd_set_default_power_state() argument
1932 genpd->states = state; in genpd_set_default_power_state()
1933 genpd->state_count = 1; in genpd_set_default_power_state()
1934 genpd->free_states = genpd_free_default_power_state; in genpd_set_default_power_state()
1939 static void genpd_lock_init(struct generic_pm_domain *genpd) in genpd_lock_init() argument
1941 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { in genpd_lock_init()
1942 spin_lock_init(&genpd->slock); in genpd_lock_init()
1943 genpd->lock_ops = &genpd_spin_ops; in genpd_lock_init()
1945 mutex_init(&genpd->mlock); in genpd_lock_init()
1946 genpd->lock_ops = &genpd_mtx_ops; in genpd_lock_init()
1958 int pm_genpd_init(struct generic_pm_domain *genpd, in pm_genpd_init() argument
1963 if (IS_ERR_OR_NULL(genpd)) in pm_genpd_init()
1966 INIT_LIST_HEAD(&genpd->parent_links); in pm_genpd_init()
1967 INIT_LIST_HEAD(&genpd->child_links); in pm_genpd_init()
1968 INIT_LIST_HEAD(&genpd->dev_list); in pm_genpd_init()
1969 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); in pm_genpd_init()
1970 genpd_lock_init(genpd); in pm_genpd_init()
1971 genpd->gov = gov; in pm_genpd_init()
1972 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); in pm_genpd_init()
1973 atomic_set(&genpd->sd_count, 0); in pm_genpd_init()
1974 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; in pm_genpd_init()
1975 genpd->device_count = 0; in pm_genpd_init()
1976 genpd->max_off_time_ns = -1; in pm_genpd_init()
1977 genpd->max_off_time_changed = true; in pm_genpd_init()
1978 genpd->next_wakeup = KTIME_MAX; in pm_genpd_init()
1979 genpd->provider = NULL; in pm_genpd_init()
1980 genpd->has_provider = false; in pm_genpd_init()
1981 genpd->accounting_time = ktime_get(); in pm_genpd_init()
1982 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; in pm_genpd_init()
1983 genpd->domain.ops.runtime_resume = genpd_runtime_resume; in pm_genpd_init()
1984 genpd->domain.ops.prepare = genpd_prepare; in pm_genpd_init()
1985 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; in pm_genpd_init()
1986 genpd->domain.ops.resume_noirq = genpd_resume_noirq; in pm_genpd_init()
1987 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; in pm_genpd_init()
1988 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; in pm_genpd_init()
1989 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; in pm_genpd_init()
1990 genpd->domain.ops.restore_noirq = genpd_restore_noirq; in pm_genpd_init()
1991 genpd->domain.ops.complete = genpd_complete; in pm_genpd_init()
1992 genpd->domain.start = genpd_dev_pm_start; in pm_genpd_init()
1994 if (genpd->flags & GENPD_FLAG_PM_CLK) { in pm_genpd_init()
1995 genpd->dev_ops.stop = pm_clk_suspend; in pm_genpd_init()
1996 genpd->dev_ops.start = pm_clk_resume; in pm_genpd_init()
2000 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && in pm_genpd_init()
2001 !genpd_status_on(genpd)) in pm_genpd_init()
2004 if (genpd_is_cpu_domain(genpd) && in pm_genpd_init()
2005 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) in pm_genpd_init()
2009 if (genpd->state_count == 0) { in pm_genpd_init()
2010 ret = genpd_set_default_power_state(genpd); in pm_genpd_init()
2012 if (genpd_is_cpu_domain(genpd)) in pm_genpd_init()
2013 free_cpumask_var(genpd->cpus); in pm_genpd_init()
2016 } else if (!gov && genpd->state_count > 1) { in pm_genpd_init()
2017 pr_warn("%s: no governor for states\n", genpd->name); in pm_genpd_init()
2020 device_initialize(&genpd->dev); in pm_genpd_init()
2021 dev_set_name(&genpd->dev, "%s", genpd->name); in pm_genpd_init()
2024 list_add(&genpd->gpd_list_node, &gpd_list); in pm_genpd_init()
2025 genpd_debug_add(genpd); in pm_genpd_init()
2032 static int genpd_remove(struct generic_pm_domain *genpd) in genpd_remove() argument
2036 if (IS_ERR_OR_NULL(genpd)) in genpd_remove()
2039 genpd_lock(genpd); in genpd_remove()
2041 if (genpd->has_provider) { in genpd_remove()
2042 genpd_unlock(genpd); in genpd_remove()
2043 pr_err("Provider present, unable to remove %s\n", genpd->name); in genpd_remove()
2047 if (!list_empty(&genpd->parent_links) || genpd->device_count) { in genpd_remove()
2048 genpd_unlock(genpd); in genpd_remove()
2049 pr_err("%s: unable to remove %s\n", __func__, genpd->name); in genpd_remove()
2053 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { in genpd_remove()
2059 list_del(&genpd->gpd_list_node); in genpd_remove()
2060 genpd_unlock(genpd); in genpd_remove()
2061 genpd_debug_remove(genpd); in genpd_remove()
2062 cancel_work_sync(&genpd->power_off_work); in genpd_remove()
2063 if (genpd_is_cpu_domain(genpd)) in genpd_remove()
2064 free_cpumask_var(genpd->cpus); in genpd_remove()
2065 if (genpd->free_states) in genpd_remove()
2066 genpd->free_states(genpd->states, genpd->state_count); in genpd_remove()
2068 pr_debug("%s: removed %s\n", __func__, genpd->name); in genpd_remove()
2086 int pm_genpd_remove(struct generic_pm_domain *genpd) in pm_genpd_remove() argument
2091 ret = genpd_remove(genpd); in pm_genpd_remove()
2211 static bool genpd_present(const struct generic_pm_domain *genpd) in genpd_present() argument
2216 if (gpd == genpd) in genpd_present()
2227 struct generic_pm_domain *genpd) in of_genpd_add_provider_simple() argument
2231 if (!np || !genpd) in of_genpd_add_provider_simple()
2236 if (!genpd_present(genpd)) in of_genpd_add_provider_simple()
2239 genpd->dev.of_node = np; in of_genpd_add_provider_simple()
2242 if (genpd->set_performance_state) { in of_genpd_add_provider_simple()
2243 ret = dev_pm_opp_of_add_table(&genpd->dev); in of_genpd_add_provider_simple()
2246 dev_err(&genpd->dev, "Failed to add OPP table: %d\n", in of_genpd_add_provider_simple()
2255 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); in of_genpd_add_provider_simple()
2256 WARN_ON(IS_ERR(genpd->opp_table)); in of_genpd_add_provider_simple()
2259 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); in of_genpd_add_provider_simple()
2261 if (genpd->set_performance_state) { in of_genpd_add_provider_simple()
2262 dev_pm_opp_put_opp_table(genpd->opp_table); in of_genpd_add_provider_simple()
2263 dev_pm_opp_of_remove_table(&genpd->dev); in of_genpd_add_provider_simple()
2269 genpd->provider = &np->fwnode; in of_genpd_add_provider_simple()
2270 genpd->has_provider = true; in of_genpd_add_provider_simple()
2287 struct generic_pm_domain *genpd; in of_genpd_add_provider_onecell() local
2300 genpd = data->domains[i]; in of_genpd_add_provider_onecell()
2302 if (!genpd) in of_genpd_add_provider_onecell()
2304 if (!genpd_present(genpd)) in of_genpd_add_provider_onecell()
2307 genpd->dev.of_node = np; in of_genpd_add_provider_onecell()
2310 if (genpd->set_performance_state) { in of_genpd_add_provider_onecell()
2311 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); in of_genpd_add_provider_onecell()
2314 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", in of_genpd_add_provider_onecell()
2323 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); in of_genpd_add_provider_onecell()
2324 WARN_ON(IS_ERR(genpd->opp_table)); in of_genpd_add_provider_onecell()
2327 genpd->provider = &np->fwnode; in of_genpd_add_provider_onecell()
2328 genpd->has_provider = true; in of_genpd_add_provider_onecell()
2341 genpd = data->domains[i]; in of_genpd_add_provider_onecell()
2343 if (!genpd) in of_genpd_add_provider_onecell()
2346 genpd->provider = NULL; in of_genpd_add_provider_onecell()
2347 genpd->has_provider = false; in of_genpd_add_provider_onecell()
2349 if (genpd->set_performance_state) { in of_genpd_add_provider_onecell()
2350 dev_pm_opp_put_opp_table(genpd->opp_table); in of_genpd_add_provider_onecell()
2351 dev_pm_opp_of_remove_table(&genpd->dev); in of_genpd_add_provider_onecell()
2417 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); in genpd_get_from_provider() local
2428 genpd = provider->xlate(genpdspec, provider->data); in genpd_get_from_provider()
2429 if (!IS_ERR(genpd)) in genpd_get_from_provider()
2435 return genpd; in genpd_get_from_provider()
2448 struct generic_pm_domain *genpd; in of_genpd_add_device() local
2453 genpd = genpd_get_from_provider(genpdspec); in of_genpd_add_device()
2454 if (IS_ERR(genpd)) { in of_genpd_add_device()
2455 ret = PTR_ERR(genpd); in of_genpd_add_device()
2459 ret = genpd_add_device(genpd, dev, dev); in of_genpd_add_device()
2559 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); in of_genpd_remove_last() local
2569 genpd = ret ? ERR_PTR(ret) : gpd; in of_genpd_remove_last()
2575 return genpd; in of_genpd_remove_last()
2944 struct generic_pm_domain *genpd = NULL; in pm_genpd_opp_to_performance_state() local
2947 genpd = container_of(genpd_dev, struct generic_pm_domain, dev); in pm_genpd_opp_to_performance_state()
2949 if (unlikely(!genpd->opp_to_performance_state)) in pm_genpd_opp_to_performance_state()
2952 genpd_lock(genpd); in pm_genpd_opp_to_performance_state()
2953 state = genpd->opp_to_performance_state(genpd, opp); in pm_genpd_opp_to_performance_state()
2954 genpd_unlock(genpd); in pm_genpd_opp_to_performance_state()
2999 struct generic_pm_domain *genpd) in genpd_summary_one() argument
3011 ret = genpd_lock_interruptible(genpd); in genpd_summary_one()
3015 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) in genpd_summary_one()
3017 if (!genpd_status_on(genpd)) in genpd_summary_one()
3019 status_lookup[genpd->status], genpd->state_idx); in genpd_summary_one()
3022 status_lookup[genpd->status]); in genpd_summary_one()
3023 seq_printf(s, "%-30s %-15s ", genpd->name, state); in genpd_summary_one()
3030 list_for_each_entry(link, &genpd->parent_links, parent_node) { in genpd_summary_one()
3032 if (!list_is_last(&link->parent_node, &genpd->parent_links)) in genpd_summary_one()
3036 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { in genpd_summary_one()
3038 genpd_is_irq_safe(genpd) ? in genpd_summary_one()
3050 genpd_unlock(genpd); in genpd_summary_one()
3057 struct generic_pm_domain *genpd; in summary_show() local
3068 list_for_each_entry(genpd, &gpd_list, gpd_list_node) { in summary_show()
3069 ret = genpd_summary_one(s, genpd); in summary_show()
3085 struct generic_pm_domain *genpd = s->private; in status_show() local
3088 ret = genpd_lock_interruptible(genpd); in status_show()
3092 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) in status_show()
3095 if (genpd->status == GENPD_STATE_OFF) in status_show()
3096 seq_printf(s, "%s-%u\n", status_lookup[genpd->status], in status_show()
3097 genpd->state_idx); in status_show()
3099 seq_printf(s, "%s\n", status_lookup[genpd->status]); in status_show()
3101 genpd_unlock(genpd); in status_show()
3107 struct generic_pm_domain *genpd = s->private; in sub_domains_show() local
3111 ret = genpd_lock_interruptible(genpd); in sub_domains_show()
3115 list_for_each_entry(link, &genpd->parent_links, parent_node) in sub_domains_show()
3118 genpd_unlock(genpd); in sub_domains_show()
3124 struct generic_pm_domain *genpd = s->private; in idle_states_show() local
3128 ret = genpd_lock_interruptible(genpd); in idle_states_show()
3134 for (i = 0; i < genpd->state_count; i++) { in idle_states_show()
3138 if ((genpd->status == GENPD_STATE_OFF) && in idle_states_show()
3139 (genpd->state_idx == i)) in idle_states_show()
3140 delta = ktime_sub(ktime_get(), genpd->accounting_time); in idle_states_show()
3143 ktime_add(genpd->states[i].idle_time, delta)); in idle_states_show()
3145 genpd->states[i].usage, genpd->states[i].rejected); in idle_states_show()
3148 genpd_unlock(genpd); in idle_states_show()
3154 struct generic_pm_domain *genpd = s->private; in active_time_show() local
3158 ret = genpd_lock_interruptible(genpd); in active_time_show()
3162 if (genpd->status == GENPD_STATE_ON) in active_time_show()
3163 delta = ktime_sub(ktime_get(), genpd->accounting_time); in active_time_show()
3166 ktime_add(genpd->on_time, delta))); in active_time_show()
3168 genpd_unlock(genpd); in active_time_show()
3174 struct generic_pm_domain *genpd = s->private; in total_idle_time_show() local
3179 ret = genpd_lock_interruptible(genpd); in total_idle_time_show()
3183 for (i = 0; i < genpd->state_count; i++) { in total_idle_time_show()
3185 if ((genpd->status == GENPD_STATE_OFF) && in total_idle_time_show()
3186 (genpd->state_idx == i)) in total_idle_time_show()
3187 delta = ktime_sub(ktime_get(), genpd->accounting_time); in total_idle_time_show()
3189 total = ktime_add(total, genpd->states[i].idle_time); in total_idle_time_show()
3195 genpd_unlock(genpd); in total_idle_time_show()
3202 struct generic_pm_domain *genpd = s->private; in devices_show() local
3207 ret = genpd_lock_interruptible(genpd); in devices_show()
3211 list_for_each_entry(pm_data, &genpd->dev_list, list_node) { in devices_show()
3213 genpd_is_irq_safe(genpd) ? in devices_show()
3222 genpd_unlock(genpd); in devices_show()
3228 struct generic_pm_domain *genpd = s->private; in perf_state_show() local
3230 if (genpd_lock_interruptible(genpd)) in perf_state_show()
3233 seq_printf(s, "%u\n", genpd->performance_state); in perf_state_show()
3235 genpd_unlock(genpd); in perf_state_show()
3248 static void genpd_debug_add(struct generic_pm_domain *genpd) in genpd_debug_add() argument
3255 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); in genpd_debug_add()
3258 d, genpd, &status_fops); in genpd_debug_add()
3260 d, genpd, &sub_domains_fops); in genpd_debug_add()
3262 d, genpd, &idle_states_fops); in genpd_debug_add()
3264 d, genpd, &active_time_fops); in genpd_debug_add()
3266 d, genpd, &total_idle_time_fops); in genpd_debug_add()
3268 d, genpd, &devices_fops); in genpd_debug_add()
3269 if (genpd->set_performance_state) in genpd_debug_add()
3271 d, genpd, &perf_state_fops); in genpd_debug_add()
3276 struct generic_pm_domain *genpd; in genpd_debug_init() local
3283 list_for_each_entry(genpd, &gpd_list, gpd_list_node) in genpd_debug_init()
3284 genpd_debug_add(genpd); in genpd_debug_init()