1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun * of such GNU licence.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun * Boston, MA 02110-1301, USA.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun * @file mali_kbase_pm.c
22*4882a593Smuzhiyun * Base kernel power management APIs
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <mali_kbase.h>
26*4882a593Smuzhiyun #include <mali_midg_regmap.h>
27*4882a593Smuzhiyun #include <mali_kbase_vinstr.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <mali_kbase_pm.h>
30*4882a593Smuzhiyun
kbase_pm_powerup(struct kbase_device * kbdev,unsigned int flags)31*4882a593Smuzhiyun int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun return kbase_hwaccess_pm_powerup(kbdev, flags);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
kbase_pm_halt(struct kbase_device * kbdev)36*4882a593Smuzhiyun void kbase_pm_halt(struct kbase_device *kbdev)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun kbase_hwaccess_pm_halt(kbdev);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
kbase_pm_context_active(struct kbase_device * kbdev)41*4882a593Smuzhiyun void kbase_pm_context_active(struct kbase_device *kbdev)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun (void)kbase_pm_context_active_handle_suspend(kbdev, KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
kbase_pm_context_active_handle_suspend(struct kbase_device * kbdev,enum kbase_pm_suspend_handler suspend_handler)46*4882a593Smuzhiyun int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbase_pm_suspend_handler suspend_handler)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
49*4882a593Smuzhiyun int c;
50*4882a593Smuzhiyun int old_count;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev != NULL);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Trace timeline information about how long it took to handle the decision
55*4882a593Smuzhiyun * to powerup. Sometimes the event might be missed due to reading the count
56*4882a593Smuzhiyun * outside of mutex, but this is necessary to get the trace timing
57*4882a593Smuzhiyun * correct. */
58*4882a593Smuzhiyun old_count = kbdev->pm.active_count;
59*4882a593Smuzhiyun if (old_count == 0)
60*4882a593Smuzhiyun kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun mutex_lock(&js_devdata->runpool_mutex);
63*4882a593Smuzhiyun mutex_lock(&kbdev->pm.lock);
64*4882a593Smuzhiyun if (kbase_pm_is_suspending(kbdev)) {
65*4882a593Smuzhiyun switch (suspend_handler) {
66*4882a593Smuzhiyun case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
67*4882a593Smuzhiyun if (kbdev->pm.active_count != 0)
68*4882a593Smuzhiyun break;
69*4882a593Smuzhiyun /* FALLTHROUGH */
70*4882a593Smuzhiyun case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
71*4882a593Smuzhiyun mutex_unlock(&kbdev->pm.lock);
72*4882a593Smuzhiyun mutex_unlock(&js_devdata->runpool_mutex);
73*4882a593Smuzhiyun if (old_count == 0)
74*4882a593Smuzhiyun kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
75*4882a593Smuzhiyun return 1;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
78*4882a593Smuzhiyun /* FALLTHROUGH */
79*4882a593Smuzhiyun default:
80*4882a593Smuzhiyun KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
81*4882a593Smuzhiyun break;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun c = ++kbdev->pm.active_count;
85*4882a593Smuzhiyun KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
86*4882a593Smuzhiyun KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Trace the event being handled */
89*4882a593Smuzhiyun if (old_count == 0)
90*4882a593Smuzhiyun kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (c == 1)
93*4882a593Smuzhiyun /* First context active: Power on the GPU and any cores requested by
94*4882a593Smuzhiyun * the policy */
95*4882a593Smuzhiyun kbase_hwaccess_pm_gpu_active(kbdev);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun mutex_unlock(&kbdev->pm.lock);
98*4882a593Smuzhiyun mutex_unlock(&js_devdata->runpool_mutex);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_pm_context_active);
104*4882a593Smuzhiyun
kbase_pm_context_idle(struct kbase_device * kbdev)105*4882a593Smuzhiyun void kbase_pm_context_idle(struct kbase_device *kbdev)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
108*4882a593Smuzhiyun int c;
109*4882a593Smuzhiyun int old_count;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev != NULL);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Trace timeline information about how long it took to handle the decision
114*4882a593Smuzhiyun * to powerdown. Sometimes the event might be missed due to reading the
115*4882a593Smuzhiyun * count outside of mutex, but this is necessary to get the trace timing
116*4882a593Smuzhiyun * correct. */
117*4882a593Smuzhiyun old_count = kbdev->pm.active_count;
118*4882a593Smuzhiyun if (old_count == 0)
119*4882a593Smuzhiyun kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun mutex_lock(&js_devdata->runpool_mutex);
122*4882a593Smuzhiyun mutex_lock(&kbdev->pm.lock);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun c = --kbdev->pm.active_count;
125*4882a593Smuzhiyun KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
126*4882a593Smuzhiyun KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(c >= 0);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Trace the event being handled */
131*4882a593Smuzhiyun if (old_count == 0)
132*4882a593Smuzhiyun kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (c == 0) {
135*4882a593Smuzhiyun /* Last context has gone idle */
136*4882a593Smuzhiyun kbase_hwaccess_pm_gpu_idle(kbdev);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Wake up anyone waiting for this to become 0 (e.g. suspend). The
139*4882a593Smuzhiyun * waiters must synchronize with us by locking the pm.lock after
140*4882a593Smuzhiyun * waiting */
141*4882a593Smuzhiyun wake_up(&kbdev->pm.zero_active_count_wait);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun mutex_unlock(&kbdev->pm.lock);
145*4882a593Smuzhiyun mutex_unlock(&js_devdata->runpool_mutex);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
149*4882a593Smuzhiyun
kbase_pm_suspend(struct kbase_device * kbdev)150*4882a593Smuzhiyun void kbase_pm_suspend(struct kbase_device *kbdev)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kbdev);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Suspend vinstr.
155*4882a593Smuzhiyun * This call will block until vinstr is suspended. */
156*4882a593Smuzhiyun kbase_vinstr_suspend(kbdev->vinstr_ctx);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun mutex_lock(&kbdev->pm.lock);
159*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
160*4882a593Smuzhiyun kbdev->pm.suspending = true;
161*4882a593Smuzhiyun mutex_unlock(&kbdev->pm.lock);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* From now on, the active count will drop towards zero. Sometimes, it'll
164*4882a593Smuzhiyun * go up briefly before going down again. However, once it reaches zero it
165*4882a593Smuzhiyun * will stay there - guaranteeing that we've idled all pm references */
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Suspend job scheduler and associated components, so that it releases all
168*4882a593Smuzhiyun * the PM active count references */
169*4882a593Smuzhiyun kbasep_js_suspend(kbdev);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Wait for the active count to reach zero. This is not the same as
172*4882a593Smuzhiyun * waiting for a power down, since not all policies power down when this
173*4882a593Smuzhiyun * reaches zero. */
174*4882a593Smuzhiyun wait_event(kbdev->pm.zero_active_count_wait, kbdev->pm.active_count == 0);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* NOTE: We synchronize with anything that was just finishing a
177*4882a593Smuzhiyun * kbase_pm_context_idle() call by locking the pm.lock below */
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun kbase_hwaccess_pm_suspend(kbdev);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
kbase_pm_resume(struct kbase_device * kbdev)182*4882a593Smuzhiyun void kbase_pm_resume(struct kbase_device *kbdev)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun /* MUST happen before any pm_context_active calls occur */
185*4882a593Smuzhiyun kbase_hwaccess_pm_resume(kbdev);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Initial active call, to power on the GPU/cores if needed */
188*4882a593Smuzhiyun kbase_pm_context_active(kbdev);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Resume any blocked atoms (which may cause contexts to be scheduled in
191*4882a593Smuzhiyun * and dependent atoms to run) */
192*4882a593Smuzhiyun kbase_resume_suspended_soft_jobs(kbdev);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* Resume the Job Scheduler and associated components, and start running
195*4882a593Smuzhiyun * atoms */
196*4882a593Smuzhiyun kbasep_js_resume(kbdev);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Matching idle call, to power off the GPU/cores if we didn't actually
199*4882a593Smuzhiyun * need it and the policy doesn't want it on */
200*4882a593Smuzhiyun kbase_pm_context_idle(kbdev);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Resume vinstr operation */
203*4882a593Smuzhiyun kbase_vinstr_resume(kbdev->vinstr_ctx);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206