1*4882a593Smuzhiyun /* SPDX-License-Identifier: MIT */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2019 Intel Corporation
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef __INTEL_RUNTIME_PM_H__
7*4882a593Smuzhiyun #define __INTEL_RUNTIME_PM_H__
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "display/intel_display.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "intel_wakeref.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "i915_utils.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun struct device;
18*4882a593Smuzhiyun struct drm_i915_private;
19*4882a593Smuzhiyun struct drm_printer;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun enum i915_drm_suspend_mode {
22*4882a593Smuzhiyun I915_DRM_SUSPEND_IDLE,
23*4882a593Smuzhiyun I915_DRM_SUSPEND_MEM,
24*4882a593Smuzhiyun I915_DRM_SUSPEND_HIBERNATE,
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * This struct helps tracking the state needed for runtime PM, which puts the
29*4882a593Smuzhiyun * device in PCI D3 state. Notice that when this happens, nothing on the
30*4882a593Smuzhiyun * graphics device works, even register access, so we don't get interrupts nor
31*4882a593Smuzhiyun * anything else.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Every piece of our code that needs to actually touch the hardware needs to
34*4882a593Smuzhiyun * either call intel_runtime_pm_get or call intel_display_power_get with the
35*4882a593Smuzhiyun * appropriate power domain.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Our driver uses the autosuspend delay feature, which means we'll only really
38*4882a593Smuzhiyun * suspend if we stay with zero refcount for a certain amount of time. The
39*4882a593Smuzhiyun * default value is currently very conservative (see intel_runtime_pm_enable), but
40*4882a593Smuzhiyun * it can be changed with the standard runtime PM files from sysfs.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * The irqs_disabled variable becomes true exactly after we disable the IRQs and
43*4882a593Smuzhiyun * goes back to false exactly before we reenable the IRQs. We use this variable
44*4882a593Smuzhiyun * to check if someone is trying to enable/disable IRQs while they're supposed
45*4882a593Smuzhiyun * to be disabled. This shouldn't happen and we'll print some error messages in
46*4882a593Smuzhiyun * case it happens.
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * For more, read the Documentation/power/runtime_pm.rst.
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun struct intel_runtime_pm {
51*4882a593Smuzhiyun atomic_t wakeref_count;
52*4882a593Smuzhiyun struct device *kdev; /* points to i915->drm.pdev->dev */
53*4882a593Smuzhiyun bool available;
54*4882a593Smuzhiyun bool suspended;
55*4882a593Smuzhiyun bool irqs_enabled;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * To aide detection of wakeref leaks and general misuse, we
60*4882a593Smuzhiyun * track all wakeref holders. With manual markup (i.e. returning
61*4882a593Smuzhiyun * a cookie to each rpm_get caller which they then supply to their
62*4882a593Smuzhiyun * paired rpm_put) we can remove corresponding pairs of and keep
63*4882a593Smuzhiyun * the array trimmed to active wakerefs.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun struct intel_runtime_pm_debug {
66*4882a593Smuzhiyun spinlock_t lock;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun depot_stack_handle_t last_acquire;
69*4882a593Smuzhiyun depot_stack_handle_t last_release;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun depot_stack_handle_t *owners;
72*4882a593Smuzhiyun unsigned long count;
73*4882a593Smuzhiyun } debug;
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define BITS_PER_WAKEREF \
78*4882a593Smuzhiyun BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
79*4882a593Smuzhiyun #define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
80*4882a593Smuzhiyun #define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
81*4882a593Smuzhiyun #define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static inline int
intel_rpm_raw_wakeref_count(int wakeref_count)84*4882a593Smuzhiyun intel_rpm_raw_wakeref_count(int wakeref_count)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun static inline int
intel_rpm_wakelock_count(int wakeref_count)90*4882a593Smuzhiyun intel_rpm_wakelock_count(int wakeref_count)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun static inline void
assert_rpm_device_not_suspended(struct intel_runtime_pm * rpm)96*4882a593Smuzhiyun assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun WARN_ONCE(rpm->suspended,
99*4882a593Smuzhiyun "Device suspended during HW access\n");
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun static inline void
__assert_rpm_raw_wakeref_held(struct intel_runtime_pm * rpm,int wakeref_count)103*4882a593Smuzhiyun __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun assert_rpm_device_not_suspended(rpm);
106*4882a593Smuzhiyun WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
107*4882a593Smuzhiyun "RPM raw-wakeref not held\n");
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static inline void
__assert_rpm_wakelock_held(struct intel_runtime_pm * rpm,int wakeref_count)111*4882a593Smuzhiyun __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun __assert_rpm_raw_wakeref_held(rpm, wakeref_count);
114*4882a593Smuzhiyun WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
115*4882a593Smuzhiyun "RPM wakelock ref not held during HW access\n");
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun static inline void
assert_rpm_raw_wakeref_held(struct intel_runtime_pm * rpm)119*4882a593Smuzhiyun assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun static inline void
assert_rpm_wakelock_held(struct intel_runtime_pm * rpm)125*4882a593Smuzhiyun assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * disable_rpm_wakeref_asserts - disable the RPM assert checks
132*4882a593Smuzhiyun * @rpm: the intel_runtime_pm structure
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * This function disable asserts that check if we hold an RPM wakelock
135*4882a593Smuzhiyun * reference, while keeping the device-not-suspended checks still enabled.
136*4882a593Smuzhiyun * It's meant to be used only in special circumstances where our rule about
137*4882a593Smuzhiyun * the wakelock refcount wrt. the device power state doesn't hold. According
138*4882a593Smuzhiyun * to this rule at any point where we access the HW or want to keep the HW in
139*4882a593Smuzhiyun * an active state we must hold an RPM wakelock reference acquired via one of
140*4882a593Smuzhiyun * the intel_runtime_pm_get() helpers. Currently there are a few special spots
141*4882a593Smuzhiyun * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
142*4882a593Smuzhiyun * forcewake release timer, and the GPU RPS and hangcheck works. All other
143*4882a593Smuzhiyun * users should avoid using this function.
144*4882a593Smuzhiyun *
145*4882a593Smuzhiyun * Any calls to this function must have a symmetric call to
146*4882a593Smuzhiyun * enable_rpm_wakeref_asserts().
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun static inline void
disable_rpm_wakeref_asserts(struct intel_runtime_pm * rpm)149*4882a593Smuzhiyun disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
152*4882a593Smuzhiyun &rpm->wakeref_count);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
157*4882a593Smuzhiyun * @rpm: the intel_runtime_pm structure
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * This function re-enables the RPM assert checks after disabling them with
160*4882a593Smuzhiyun * disable_rpm_wakeref_asserts. It's meant to be used only in special
161*4882a593Smuzhiyun * circumstances otherwise its use should be avoided.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * Any calls to this function must have a symmetric call to
164*4882a593Smuzhiyun * disable_rpm_wakeref_asserts().
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun static inline void
enable_rpm_wakeref_asserts(struct intel_runtime_pm * rpm)167*4882a593Smuzhiyun enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
170*4882a593Smuzhiyun &rpm->wakeref_count);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
174*4882a593Smuzhiyun void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
175*4882a593Smuzhiyun void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
176*4882a593Smuzhiyun void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
179*4882a593Smuzhiyun intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
180*4882a593Smuzhiyun intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
181*4882a593Smuzhiyun intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
182*4882a593Smuzhiyun intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #define with_intel_runtime_pm(rpm, wf) \
185*4882a593Smuzhiyun for ((wf) = intel_runtime_pm_get(rpm); (wf); \
186*4882a593Smuzhiyun intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define with_intel_runtime_pm_if_in_use(rpm, wf) \
189*4882a593Smuzhiyun for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
190*4882a593Smuzhiyun intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun #define with_intel_runtime_pm_if_active(rpm, wf) \
193*4882a593Smuzhiyun for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
194*4882a593Smuzhiyun intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
197*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
198*4882a593Smuzhiyun void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
199*4882a593Smuzhiyun #else
200*4882a593Smuzhiyun static inline void
intel_runtime_pm_put(struct intel_runtime_pm * rpm,intel_wakeref_t wref)201*4882a593Smuzhiyun intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun intel_runtime_pm_put_unchecked(rpm);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun #endif
206*4882a593Smuzhiyun void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
209*4882a593Smuzhiyun void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
210*4882a593Smuzhiyun struct drm_printer *p);
211*4882a593Smuzhiyun #else
print_intel_runtime_pm_wakeref(struct intel_runtime_pm * rpm,struct drm_printer * p)212*4882a593Smuzhiyun static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
213*4882a593Smuzhiyun struct drm_printer *p)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun #endif
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun #endif /* __INTEL_RUNTIME_PM_H__ */
219