xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase_pm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software and is provided to you under the terms of the
7*4882a593Smuzhiyun  * GNU General Public License version 2 as published by the Free Software
8*4882a593Smuzhiyun  * Foundation, and any use by you of this program is subject to the terms
9*4882a593Smuzhiyun  * of such GNU license.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
12*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*4882a593Smuzhiyun  * GNU General Public License for more details.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
17*4882a593Smuzhiyun  * along with this program; if not, you can access it online at
18*4882a593Smuzhiyun  * http://www.gnu.org/licenses/gpl-2.0.html.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun  * DOC: Base kernel power management APIs
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <mali_kbase.h>
27*4882a593Smuzhiyun #include <gpu/mali_kbase_gpu_regmap.h>
28*4882a593Smuzhiyun #include <mali_kbase_vinstr.h>
29*4882a593Smuzhiyun #include <mali_kbase_kinstr_prfcnt.h>
30*4882a593Smuzhiyun #include <hwcnt/mali_kbase_hwcnt_context.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <mali_kbase_pm.h>
33*4882a593Smuzhiyun #include <backend/gpu/mali_kbase_pm_internal.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
36*4882a593Smuzhiyun #include <arbiter/mali_kbase_arbiter_pm.h>
37*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include <backend/gpu/mali_kbase_clk_rate_trace_mgr.h>
40*4882a593Smuzhiyun 
kbase_pm_powerup(struct kbase_device * kbdev,unsigned int flags)41*4882a593Smuzhiyun int kbase_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	return kbase_hwaccess_pm_powerup(kbdev, flags);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
kbase_pm_halt(struct kbase_device * kbdev)46*4882a593Smuzhiyun void kbase_pm_halt(struct kbase_device *kbdev)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	kbase_hwaccess_pm_halt(kbdev);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
kbase_pm_context_active(struct kbase_device * kbdev)51*4882a593Smuzhiyun void kbase_pm_context_active(struct kbase_device *kbdev)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	(void)kbase_pm_context_active_handle_suspend(kbdev,
54*4882a593Smuzhiyun 		KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
kbase_pm_context_active_handle_suspend(struct kbase_device * kbdev,enum kbase_pm_suspend_handler suspend_handler)57*4882a593Smuzhiyun int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev,
58*4882a593Smuzhiyun 	enum kbase_pm_suspend_handler suspend_handler)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	int c;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kbdev != NULL);
63*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, "%s - reason = %d, pid = %d\n", __func__,
64*4882a593Smuzhiyun 		suspend_handler, current->pid);
65*4882a593Smuzhiyun 	kbase_pm_lock(kbdev);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
68*4882a593Smuzhiyun 	if (kbase_arbiter_pm_ctx_active_handle_suspend(kbdev,
69*4882a593Smuzhiyun 			suspend_handler)) {
70*4882a593Smuzhiyun 		kbase_pm_unlock(kbdev);
71*4882a593Smuzhiyun 		return 1;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (kbase_pm_is_suspending(kbdev)) {
76*4882a593Smuzhiyun 		switch (suspend_handler) {
77*4882a593Smuzhiyun 		case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
78*4882a593Smuzhiyun 			if (kbdev->pm.active_count != 0)
79*4882a593Smuzhiyun 				break;
80*4882a593Smuzhiyun 			fallthrough;
81*4882a593Smuzhiyun 		case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
82*4882a593Smuzhiyun 			kbase_pm_unlock(kbdev);
83*4882a593Smuzhiyun 			return 1;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
86*4882a593Smuzhiyun 			fallthrough;
87*4882a593Smuzhiyun 		default:
88*4882a593Smuzhiyun 			KBASE_DEBUG_ASSERT_MSG(false, "unreachable");
89*4882a593Smuzhiyun 			break;
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 	c = ++kbdev->pm.active_count;
93*4882a593Smuzhiyun 	KBASE_KTRACE_ADD(kbdev, PM_CONTEXT_ACTIVE, NULL, c);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (c == 1) {
96*4882a593Smuzhiyun 		/* First context active: Power on the GPU and
97*4882a593Smuzhiyun 		 * any cores requested by the policy
98*4882a593Smuzhiyun 		 */
99*4882a593Smuzhiyun 		kbase_hwaccess_pm_gpu_active(kbdev);
100*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
101*4882a593Smuzhiyun 		kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_REF_EVENT);
102*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
103*4882a593Smuzhiyun 		kbase_clk_rate_trace_manager_gpu_active(kbdev);
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	kbase_pm_unlock(kbdev);
107*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, "%s %d\n", __func__, kbdev->pm.active_count);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_pm_context_active);
113*4882a593Smuzhiyun 
kbase_pm_context_idle(struct kbase_device * kbdev)114*4882a593Smuzhiyun void kbase_pm_context_idle(struct kbase_device *kbdev)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	int c;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kbdev != NULL);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	kbase_pm_lock(kbdev);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	c = --kbdev->pm.active_count;
124*4882a593Smuzhiyun 	KBASE_KTRACE_ADD(kbdev, PM_CONTEXT_IDLE, NULL, c);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(c >= 0);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (c == 0) {
129*4882a593Smuzhiyun 		/* Last context has gone idle */
130*4882a593Smuzhiyun 		kbase_hwaccess_pm_gpu_idle(kbdev);
131*4882a593Smuzhiyun 		kbase_clk_rate_trace_manager_gpu_idle(kbdev);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		/* Wake up anyone waiting for this to become 0 (e.g. suspend).
134*4882a593Smuzhiyun 		 * The waiters must synchronize with us by locking the pm.lock
135*4882a593Smuzhiyun 		 * after waiting.
136*4882a593Smuzhiyun 		 */
137*4882a593Smuzhiyun 		wake_up(&kbdev->pm.zero_active_count_wait);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	kbase_pm_unlock(kbdev);
141*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, "%s %d (pid = %d)\n", __func__,
142*4882a593Smuzhiyun 		kbdev->pm.active_count, current->pid);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun KBASE_EXPORT_TEST_API(kbase_pm_context_idle);
146*4882a593Smuzhiyun 
kbase_pm_driver_suspend(struct kbase_device * kbdev)147*4882a593Smuzhiyun int kbase_pm_driver_suspend(struct kbase_device *kbdev)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kbdev);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* Suspend HW counter intermediaries. This blocks until workers and timers
152*4882a593Smuzhiyun 	 * are no longer running.
153*4882a593Smuzhiyun 	 */
154*4882a593Smuzhiyun 	kbase_vinstr_suspend(kbdev->vinstr_ctx);
155*4882a593Smuzhiyun 	kbase_kinstr_prfcnt_suspend(kbdev->kinstr_prfcnt_ctx);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* Disable GPU hardware counters.
158*4882a593Smuzhiyun 	 * This call will block until counters are disabled.
159*4882a593Smuzhiyun 	 */
160*4882a593Smuzhiyun 	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	mutex_lock(&kbdev->pm.lock);
163*4882a593Smuzhiyun 	if (WARN_ON(kbase_pm_is_suspending(kbdev))) {
164*4882a593Smuzhiyun 		mutex_unlock(&kbdev->pm.lock);
165*4882a593Smuzhiyun 		return 0;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 	kbdev->pm.suspending = true;
168*4882a593Smuzhiyun 	mutex_unlock(&kbdev->pm.lock);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
171*4882a593Smuzhiyun 	if (kbdev->arb.arb_if) {
172*4882a593Smuzhiyun 		int i;
173*4882a593Smuzhiyun 		unsigned long flags;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
176*4882a593Smuzhiyun 		kbdev->js_data.runpool_irq.submit_allowed = 0;
177*4882a593Smuzhiyun 		kbase_disjoint_state_up(kbdev);
178*4882a593Smuzhiyun 		for (i = 0; i < kbdev->gpu_props.num_job_slots; i++)
179*4882a593Smuzhiyun 			kbase_job_slot_softstop(kbdev, i, NULL);
180*4882a593Smuzhiyun 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* From now on, the active count will drop towards zero. Sometimes,
185*4882a593Smuzhiyun 	 * it'll go up briefly before going down again. However, once
186*4882a593Smuzhiyun 	 * it reaches zero it will stay there - guaranteeing that we've idled
187*4882a593Smuzhiyun 	 * all pm references
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #if !MALI_USE_CSF
191*4882a593Smuzhiyun 	/* Suspend job scheduler and associated components, so that it releases all
192*4882a593Smuzhiyun 	 * the PM active count references
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	kbasep_js_suspend(kbdev);
195*4882a593Smuzhiyun #else
196*4882a593Smuzhiyun 	if (kbase_csf_scheduler_pm_suspend(kbdev)) {
197*4882a593Smuzhiyun 		mutex_lock(&kbdev->pm.lock);
198*4882a593Smuzhiyun 		kbdev->pm.suspending = false;
199*4882a593Smuzhiyun 		mutex_unlock(&kbdev->pm.lock);
200*4882a593Smuzhiyun 		return -1;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Wait for the active count to reach zero. This is not the same as
205*4882a593Smuzhiyun 	 * waiting for a power down, since not all policies power down when this
206*4882a593Smuzhiyun 	 * reaches zero.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, ">wait_event - waiting for active_count == 0 (pid = %d)\n",
209*4882a593Smuzhiyun 		current->pid);
210*4882a593Smuzhiyun 	wait_event(kbdev->pm.zero_active_count_wait,
211*4882a593Smuzhiyun 		kbdev->pm.active_count == 0);
212*4882a593Smuzhiyun 	dev_dbg(kbdev->dev, ">wait_event - waiting done\n");
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* NOTE: We synchronize with anything that was just finishing a
215*4882a593Smuzhiyun 	 * kbase_pm_context_idle() call by locking the pm.lock below
216*4882a593Smuzhiyun 	 */
217*4882a593Smuzhiyun 	if (kbase_hwaccess_pm_suspend(kbdev)) {
218*4882a593Smuzhiyun 		mutex_lock(&kbdev->pm.lock);
219*4882a593Smuzhiyun 		kbdev->pm.suspending = false;
220*4882a593Smuzhiyun 		mutex_unlock(&kbdev->pm.lock);
221*4882a593Smuzhiyun 		return -1;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
225*4882a593Smuzhiyun 	if (kbdev->arb.arb_if) {
226*4882a593Smuzhiyun 		mutex_lock(&kbdev->pm.arb_vm_state->vm_state_lock);
227*4882a593Smuzhiyun 		kbase_arbiter_pm_vm_stopped(kbdev);
228*4882a593Smuzhiyun 		mutex_unlock(&kbdev->pm.arb_vm_state->vm_state_lock);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
kbase_pm_driver_resume(struct kbase_device * kbdev,bool arb_gpu_start)235*4882a593Smuzhiyun void kbase_pm_driver_resume(struct kbase_device *kbdev, bool arb_gpu_start)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	unsigned long flags;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* MUST happen before any pm_context_active calls occur */
240*4882a593Smuzhiyun 	kbase_hwaccess_pm_resume(kbdev);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Initial active call, to power on the GPU/cores if needed */
243*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
244*4882a593Smuzhiyun 	if (kbase_pm_context_active_handle_suspend(kbdev,
245*4882a593Smuzhiyun 			(arb_gpu_start ?
246*4882a593Smuzhiyun 				KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED :
247*4882a593Smuzhiyun 				KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE)))
248*4882a593Smuzhiyun 		return;
249*4882a593Smuzhiyun #else
250*4882a593Smuzhiyun 	kbase_pm_context_active(kbdev);
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #if !MALI_USE_CSF
254*4882a593Smuzhiyun 	/* Resume any blocked atoms (which may cause contexts to be scheduled in
255*4882a593Smuzhiyun 	 * and dependent atoms to run)
256*4882a593Smuzhiyun 	 */
257*4882a593Smuzhiyun 	kbase_resume_suspended_soft_jobs(kbdev);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* Resume the Job Scheduler and associated components, and start running
260*4882a593Smuzhiyun 	 * atoms
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	kbasep_js_resume(kbdev);
263*4882a593Smuzhiyun #endif
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* Matching idle call, to power off the GPU/cores if we didn't actually
266*4882a593Smuzhiyun 	 * need it and the policy doesn't want it on
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 	kbase_pm_context_idle(kbdev);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* Re-enable GPU hardware counters */
271*4882a593Smuzhiyun #if MALI_USE_CSF
272*4882a593Smuzhiyun 	kbase_csf_scheduler_spin_lock(kbdev, &flags);
273*4882a593Smuzhiyun 	kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
274*4882a593Smuzhiyun 	kbase_csf_scheduler_spin_unlock(kbdev, flags);
275*4882a593Smuzhiyun #else
276*4882a593Smuzhiyun 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
277*4882a593Smuzhiyun 	kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
278*4882a593Smuzhiyun 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
279*4882a593Smuzhiyun #endif
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/* Resume HW counters intermediaries. */
282*4882a593Smuzhiyun 	kbase_vinstr_resume(kbdev->vinstr_ctx);
283*4882a593Smuzhiyun 	kbase_kinstr_prfcnt_resume(kbdev->kinstr_prfcnt_ctx);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
kbase_pm_suspend(struct kbase_device * kbdev)286*4882a593Smuzhiyun int kbase_pm_suspend(struct kbase_device *kbdev)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	int result = 0;
289*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
290*4882a593Smuzhiyun 	if (kbdev->arb.arb_if)
291*4882a593Smuzhiyun 		kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_OS_SUSPEND_EVENT);
292*4882a593Smuzhiyun 	else
293*4882a593Smuzhiyun 		result = kbase_pm_driver_suspend(kbdev);
294*4882a593Smuzhiyun #else
295*4882a593Smuzhiyun 	result = kbase_pm_driver_suspend(kbdev);
296*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return result;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
kbase_pm_resume(struct kbase_device * kbdev)301*4882a593Smuzhiyun void kbase_pm_resume(struct kbase_device *kbdev)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
304*4882a593Smuzhiyun 	if (kbdev->arb.arb_if)
305*4882a593Smuzhiyun 		kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_OS_RESUME_EVENT);
306*4882a593Smuzhiyun 	else
307*4882a593Smuzhiyun 		kbase_pm_driver_resume(kbdev, false);
308*4882a593Smuzhiyun #else
309*4882a593Smuzhiyun 	kbase_pm_driver_resume(kbdev, false);
310*4882a593Smuzhiyun #endif /* CONFIG_MALI_ARBITER_SUPPORT */
311*4882a593Smuzhiyun }
312