xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_backend.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * GPU backend implementation of base kernel power management APIs
24  */
25 
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <mali_kbase_config_defaults.h>
29 
30 #include <mali_kbase_pm.h>
31 #if !MALI_USE_CSF
32 #include <mali_kbase_hwaccess_jm.h>
33 #include <backend/gpu/mali_kbase_js_internal.h>
34 #include <backend/gpu/mali_kbase_jm_internal.h>
35 #else
36 #include <linux/pm_runtime.h>
37 #include <mali_kbase_reset_gpu.h>
38 #endif /* !MALI_USE_CSF */
39 #include <hwcnt/mali_kbase_hwcnt_context.h>
40 #include <backend/gpu/mali_kbase_pm_internal.h>
41 #include <backend/gpu/mali_kbase_devfreq.h>
42 #include <mali_kbase_dummy_job_wa.h>
43 #include <backend/gpu/mali_kbase_irq_internal.h>
44 
45 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
46 static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
47 static void kbase_pm_gpu_clock_control_worker(struct work_struct *data);
48 
kbase_pm_runtime_init(struct kbase_device * kbdev)49 int kbase_pm_runtime_init(struct kbase_device *kbdev)
50 {
51 	struct kbase_pm_callback_conf *callbacks;
52 
53 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
54 	if (callbacks) {
55 		kbdev->pm.backend.callback_power_on =
56 					callbacks->power_on_callback;
57 		kbdev->pm.backend.callback_power_off =
58 					callbacks->power_off_callback;
59 		kbdev->pm.backend.callback_power_suspend =
60 					callbacks->power_suspend_callback;
61 		kbdev->pm.backend.callback_power_resume =
62 					callbacks->power_resume_callback;
63 		kbdev->pm.callback_power_runtime_init =
64 					callbacks->power_runtime_init_callback;
65 		kbdev->pm.callback_power_runtime_term =
66 					callbacks->power_runtime_term_callback;
67 		kbdev->pm.backend.callback_power_runtime_on =
68 					callbacks->power_runtime_on_callback;
69 		kbdev->pm.backend.callback_power_runtime_off =
70 					callbacks->power_runtime_off_callback;
71 		kbdev->pm.backend.callback_power_runtime_idle =
72 					callbacks->power_runtime_idle_callback;
73 		kbdev->pm.backend.callback_soft_reset =
74 					callbacks->soft_reset_callback;
75 		kbdev->pm.backend.callback_power_runtime_gpu_idle =
76 					callbacks->power_runtime_gpu_idle_callback;
77 		kbdev->pm.backend.callback_power_runtime_gpu_active =
78 					callbacks->power_runtime_gpu_active_callback;
79 
80 		if (callbacks->power_runtime_init_callback)
81 			return callbacks->power_runtime_init_callback(kbdev);
82 		else
83 			return 0;
84 	}
85 
86 	kbdev->pm.backend.callback_power_on = NULL;
87 	kbdev->pm.backend.callback_power_off = NULL;
88 	kbdev->pm.backend.callback_power_suspend = NULL;
89 	kbdev->pm.backend.callback_power_resume = NULL;
90 	kbdev->pm.callback_power_runtime_init = NULL;
91 	kbdev->pm.callback_power_runtime_term = NULL;
92 	kbdev->pm.backend.callback_power_runtime_on = NULL;
93 	kbdev->pm.backend.callback_power_runtime_off = NULL;
94 	kbdev->pm.backend.callback_power_runtime_idle = NULL;
95 	kbdev->pm.backend.callback_soft_reset = NULL;
96 	kbdev->pm.backend.callback_power_runtime_gpu_idle = NULL;
97 	kbdev->pm.backend.callback_power_runtime_gpu_active = NULL;
98 
99 	return 0;
100 }
101 
kbase_pm_runtime_term(struct kbase_device * kbdev)102 void kbase_pm_runtime_term(struct kbase_device *kbdev)
103 {
104 	if (kbdev->pm.callback_power_runtime_term)
105 		kbdev->pm.callback_power_runtime_term(kbdev);
106 }
107 
kbase_pm_register_access_enable(struct kbase_device * kbdev)108 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
109 {
110 	struct kbase_pm_callback_conf *callbacks;
111 
112 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
113 
114 	if (callbacks)
115 		callbacks->power_on_callback(kbdev);
116 
117 #ifdef CONFIG_MALI_ARBITER_SUPPORT
118 	if (WARN_ON(kbase_pm_is_gpu_lost(kbdev)))
119 		dev_err(kbdev->dev, "Attempting to power on while GPU lost\n");
120 #endif
121 
122 	kbdev->pm.backend.gpu_powered = true;
123 }
124 
kbase_pm_register_access_disable(struct kbase_device * kbdev)125 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
126 {
127 	struct kbase_pm_callback_conf *callbacks;
128 
129 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
130 
131 	kbdev->pm.backend.gpu_powered = false;
132 
133 	if (callbacks)
134 		callbacks->power_off_callback(kbdev);
135 }
136 
kbase_hwaccess_pm_init(struct kbase_device * kbdev)137 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
138 {
139 	int ret = 0;
140 
141 	KBASE_DEBUG_ASSERT(kbdev != NULL);
142 
143 	mutex_init(&kbdev->pm.lock);
144 
145 	kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
146 			WQ_HIGHPRI | WQ_UNBOUND, 1);
147 	if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
148 		return -ENOMEM;
149 
150 	INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
151 			kbase_pm_gpu_poweroff_wait_wq);
152 
153 	kbdev->pm.backend.ca_cores_enabled = ~0ull;
154 	kbdev->pm.backend.gpu_powered = false;
155 	kbdev->pm.backend.gpu_ready = false;
156 	kbdev->pm.suspending = false;
157 #ifdef CONFIG_MALI_ARBITER_SUPPORT
158 	kbase_pm_set_gpu_lost(kbdev, false);
159 #endif
160 #ifdef CONFIG_MALI_BIFROST_DEBUG
161 	kbdev->pm.backend.driver_ready_for_irqs = false;
162 #endif /* CONFIG_MALI_BIFROST_DEBUG */
163 	init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
164 
165 #if !MALI_USE_CSF
166 	/* Initialise the metrics subsystem */
167 	ret = kbasep_pm_metrics_init(kbdev);
168 	if (ret)
169 		return ret;
170 #else
171 	mutex_init(&kbdev->pm.backend.policy_change_lock);
172 	kbdev->pm.backend.policy_change_clamp_state_to_off = false;
173 	/* Due to dependency on kbase_ipa_control, the metrics subsystem can't
174 	 * be initialized here.
175 	 */
176 	CSTD_UNUSED(ret);
177 #endif
178 
179 	init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
180 	kbdev->pm.backend.reset_done = false;
181 
182 	init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
183 	init_waitqueue_head(&kbdev->pm.resume_wait);
184 	kbdev->pm.active_count = 0;
185 
186 	spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
187 
188 	init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
189 
190 	if (kbase_pm_ca_init(kbdev) != 0)
191 		goto workq_fail;
192 
193 	kbase_pm_policy_init(kbdev);
194 
195 	if (kbase_pm_state_machine_init(kbdev) != 0)
196 		goto pm_state_machine_fail;
197 
198 	kbdev->pm.backend.hwcnt_desired = false;
199 	kbdev->pm.backend.hwcnt_disabled = true;
200 	INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
201 		kbase_pm_hwcnt_disable_worker);
202 	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
203 
204 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
205 	kbdev->pm.backend.gpu_sleep_supported =
206 		kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_GPU_SLEEP) &&
207 		!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_1997) &&
208 		kbdev->pm.backend.callback_power_runtime_gpu_active &&
209 		kbdev->pm.backend.callback_power_runtime_gpu_idle;
210 #endif
211 
212 	if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
213 		kbdev->pm.backend.l2_always_on = false;
214 		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
215 
216 		return 0;
217 	}
218 
219 	/* WA1: L2 always_on for GPUs being affected by GPU2017-1336 */
220 	if (!IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE)) {
221 		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
222 		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336))
223 			kbdev->pm.backend.l2_always_on = true;
224 		else
225 			kbdev->pm.backend.l2_always_on = false;
226 
227 		return 0;
228 	}
229 
230 	/* WA3: Clock slow down for GPUs being affected by GPU2017-1336 */
231 	kbdev->pm.backend.l2_always_on = false;
232 	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336)) {
233 		kbdev->pm.backend.gpu_clock_slow_down_wa = true;
234 		kbdev->pm.backend.gpu_clock_suspend_freq = 0;
235 		kbdev->pm.backend.gpu_clock_slow_down_desired = true;
236 		kbdev->pm.backend.gpu_clock_slowed_down = false;
237 		INIT_WORK(&kbdev->pm.backend.gpu_clock_control_work,
238 			kbase_pm_gpu_clock_control_worker);
239 	} else
240 		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
241 
242 	return 0;
243 
244 pm_state_machine_fail:
245 	kbase_pm_policy_term(kbdev);
246 	kbase_pm_ca_term(kbdev);
247 workq_fail:
248 #if !MALI_USE_CSF
249 	kbasep_pm_metrics_term(kbdev);
250 #endif
251 	return -EINVAL;
252 }
253 
kbase_pm_do_poweron(struct kbase_device * kbdev,bool is_resume)254 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
255 {
256 	lockdep_assert_held(&kbdev->pm.lock);
257 
258 	/* Turn clocks and interrupts on - no-op if we haven't done a previous
259 	 * kbase_pm_clock_off()
260 	 */
261 	kbase_pm_clock_on(kbdev, is_resume);
262 
263 	if (!is_resume) {
264 		unsigned long flags;
265 
266 		/* Force update of L2 state - if we have abandoned a power off
267 		 * then this may be required to power the L2 back on.
268 		 */
269 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
270 		kbase_pm_update_state(kbdev);
271 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
272 	}
273 
274 	/* Update core status as required by the policy */
275 	kbase_pm_update_cores_state(kbdev);
276 
277 	/* NOTE: We don't wait to reach the desired state, since running atoms
278 	 * will wait for that state to be reached anyway
279 	 */
280 }
281 
pm_handle_power_off(struct kbase_device * kbdev)282 static void pm_handle_power_off(struct kbase_device *kbdev)
283 {
284 	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
285 #if MALI_USE_CSF
286 	enum kbase_mcu_state mcu_state;
287 #endif
288 	unsigned long flags;
289 
290 	lockdep_assert_held(&kbdev->pm.lock);
291 
292 	if (backend->poweron_required)
293 		return;
294 
295 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
296 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
297 	if (kbdev->pm.backend.gpu_wakeup_override) {
298 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
299 		return;
300 	}
301 #endif
302 	WARN_ON(backend->shaders_state !=
303 			KBASE_SHADERS_OFF_CORESTACK_OFF ||
304 		backend->l2_state != KBASE_L2_OFF);
305 #if MALI_USE_CSF
306 	mcu_state = backend->mcu_state;
307 	WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state));
308 #endif
309 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
310 
311 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
312 	if (backend->callback_power_runtime_gpu_idle) {
313 		WARN_ON(backend->gpu_idled);
314 		backend->callback_power_runtime_gpu_idle(kbdev);
315 		backend->gpu_idled = true;
316 		return;
317 	}
318 #endif
319 
320 	/* Disable interrupts and turn the clock off */
321 	if (!kbase_pm_clock_off(kbdev)) {
322 		/*
323 		 * Page/bus faults are pending, must drop locks to
324 		 * process.  Interrupts are disabled so no more faults
325 		 * should be generated at this point.
326 		 */
327 		kbase_pm_unlock(kbdev);
328 		kbase_flush_mmu_wqs(kbdev);
329 		kbase_pm_lock(kbdev);
330 
331 #ifdef CONFIG_MALI_ARBITER_SUPPORT
332 		/* poweron_required may have changed while pm lock
333 		 * was released.
334 		 */
335 		if (kbase_pm_is_gpu_lost(kbdev))
336 			backend->poweron_required = false;
337 #endif
338 
339 		/* Turn off clock now that fault have been handled. We
340 		 * dropped locks so poweron_required may have changed -
341 		 * power back on if this is the case (effectively only
342 		 * re-enabling of the interrupts would be done in this
343 		 * case, as the clocks to GPU were not withdrawn yet).
344 		 */
345 		if (backend->poweron_required)
346 			kbase_pm_clock_on(kbdev, false);
347 		else
348 			WARN_ON(!kbase_pm_clock_off(kbdev));
349 	}
350 }
351 
kbase_pm_gpu_poweroff_wait_wq(struct work_struct * data)352 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
353 {
354 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
355 			pm.backend.gpu_poweroff_wait_work);
356 	struct kbase_pm_device_data *pm = &kbdev->pm;
357 	struct kbase_pm_backend_data *backend = &pm->backend;
358 	unsigned long flags;
359 
360 	KBASE_KTRACE_ADD(kbdev, PM_POWEROFF_WAIT_WQ, NULL, 0);
361 
362 #if !MALI_USE_CSF
363 	/* Wait for power transitions to complete. We do this with no locks held
364 	 * so that we don't deadlock with any pending workqueues.
365 	 */
366 	kbase_pm_wait_for_desired_state(kbdev);
367 #endif
368 
369 	kbase_pm_lock(kbdev);
370 
371 	pm_handle_power_off(kbdev);
372 
373 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
374 	backend->poweroff_wait_in_progress = false;
375 	if (backend->poweron_required) {
376 		backend->poweron_required = false;
377 		kbdev->pm.backend.l2_desired = true;
378 #if MALI_USE_CSF
379 		kbdev->pm.backend.mcu_desired = true;
380 #endif
381 		kbase_pm_update_state(kbdev);
382 		kbase_pm_update_cores_state_nolock(kbdev);
383 #if !MALI_USE_CSF
384 		kbase_backend_slot_update(kbdev);
385 #endif /* !MALI_USE_CSF */
386 	}
387 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
388 
389 	kbase_pm_unlock(kbdev);
390 
391 	wake_up(&kbdev->pm.backend.poweroff_wait);
392 }
393 
kbase_pm_l2_clock_slow(struct kbase_device * kbdev)394 static void kbase_pm_l2_clock_slow(struct kbase_device *kbdev)
395 {
396 #if defined(CONFIG_MALI_BIFROST_DVFS)
397 	struct clk *clk = kbdev->clocks[0];
398 #endif
399 
400 	if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
401 		return;
402 
403 	/* No suspend clock is specified */
404 	if (WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_suspend_freq))
405 		return;
406 
407 #if defined(CONFIG_MALI_BIFROST_DEVFREQ)
408 
409 	/* Suspend devfreq */
410 	devfreq_suspend_device(kbdev->devfreq);
411 
412 	/* Keep the current freq to restore it upon resume */
413 	kbdev->previous_frequency = kbdev->current_nominal_freq;
414 
415 	/* Slow down GPU clock to the suspend clock*/
416 	kbase_devfreq_force_freq(kbdev,
417 			kbdev->pm.backend.gpu_clock_suspend_freq);
418 
419 #elif defined(CONFIG_MALI_BIFROST_DVFS) /* CONFIG_MALI_BIFROST_DEVFREQ */
420 
421 	if (WARN_ON_ONCE(!clk))
422 		return;
423 
424 	/* Stop the metrics gathering framework */
425 	kbase_pm_metrics_stop(kbdev);
426 
427 	/* Keep the current freq to restore it upon resume */
428 	kbdev->previous_frequency = clk_get_rate(clk);
429 
430 	/* Slow down GPU clock to the suspend clock*/
431 	if (WARN_ON_ONCE(clk_set_rate(clk,
432 				kbdev->pm.backend.gpu_clock_suspend_freq)))
433 		dev_err(kbdev->dev, "Failed to set suspend freq\n");
434 
435 #endif /* CONFIG_MALI_BIFROST_DVFS */
436 }
437 
kbase_pm_l2_clock_normalize(struct kbase_device * kbdev)438 static void kbase_pm_l2_clock_normalize(struct kbase_device *kbdev)
439 {
440 #if defined(CONFIG_MALI_BIFROST_DVFS)
441 	struct clk *clk = kbdev->clocks[0];
442 #endif
443 
444 	if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
445 		return;
446 
447 #if defined(CONFIG_MALI_BIFROST_DEVFREQ)
448 
449 	/* Restore GPU clock to the previous one */
450 	kbase_devfreq_force_freq(kbdev, kbdev->previous_frequency);
451 
452 	/* Resume devfreq */
453 	devfreq_resume_device(kbdev->devfreq);
454 
455 #elif defined(CONFIG_MALI_BIFROST_DVFS) /* CONFIG_MALI_BIFROST_DEVFREQ */
456 
457 	if (WARN_ON_ONCE(!clk))
458 		return;
459 
460 	/* Restore GPU clock */
461 	if (WARN_ON_ONCE(clk_set_rate(clk, kbdev->previous_frequency)))
462 		dev_err(kbdev->dev, "Failed to restore freq (%lu)\n",
463 			kbdev->previous_frequency);
464 
465 	/* Restart the metrics gathering framework */
466 	kbase_pm_metrics_start(kbdev);
467 
468 #endif /* CONFIG_MALI_BIFROST_DVFS */
469 }
470 
kbase_pm_gpu_clock_control_worker(struct work_struct * data)471 static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
472 {
473 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
474 			pm.backend.gpu_clock_control_work);
475 	struct kbase_pm_device_data *pm = &kbdev->pm;
476 	struct kbase_pm_backend_data *backend = &pm->backend;
477 	unsigned long flags;
478 	bool slow_down = false, normalize = false;
479 
480 	/* Determine if GPU clock control is required */
481 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
482 	if (!backend->gpu_clock_slowed_down &&
483 			backend->gpu_clock_slow_down_desired) {
484 		slow_down = true;
485 		backend->gpu_clock_slowed_down = true;
486 	} else if (backend->gpu_clock_slowed_down &&
487 			!backend->gpu_clock_slow_down_desired) {
488 		normalize = true;
489 		backend->gpu_clock_slowed_down = false;
490 	}
491 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
492 
493 	/* Control GPU clock according to the request of L2 state machine.
494 	 * The GPU clock needs to be lowered for safe L2 power down
495 	 * and restored to previous speed at L2 power up.
496 	 */
497 	if (slow_down)
498 		kbase_pm_l2_clock_slow(kbdev);
499 	else if (normalize)
500 		kbase_pm_l2_clock_normalize(kbdev);
501 
502 	/* Tell L2 state machine to transit to next state */
503 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
504 	kbase_pm_update_state(kbdev);
505 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
506 }
507 
kbase_pm_hwcnt_disable_worker(struct work_struct * data)508 static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
509 {
510 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
511 			pm.backend.hwcnt_disable_work);
512 	struct kbase_pm_device_data *pm = &kbdev->pm;
513 	struct kbase_pm_backend_data *backend = &pm->backend;
514 	unsigned long flags;
515 
516 	bool do_disable;
517 
518 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
519 	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
520 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
521 
522 	if (!do_disable)
523 		return;
524 
525 	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
526 
527 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
528 	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
529 
530 	if (do_disable) {
531 		/* PM state did not change while we were doing the disable,
532 		 * so commit the work we just performed and continue the state
533 		 * machine.
534 		 */
535 		backend->hwcnt_disabled = true;
536 		kbase_pm_update_state(kbdev);
537 #if !MALI_USE_CSF
538 		kbase_backend_slot_update(kbdev);
539 #endif /* !MALI_USE_CSF */
540 	} else {
541 		/* PM state was updated while we were doing the disable,
542 		 * so we need to undo the disable we just performed.
543 		 */
544 #if MALI_USE_CSF
545 		unsigned long lock_flags;
546 
547 		kbase_csf_scheduler_spin_lock(kbdev, &lock_flags);
548 #endif
549 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
550 #if MALI_USE_CSF
551 		kbase_csf_scheduler_spin_unlock(kbdev, lock_flags);
552 #endif
553 	}
554 
555 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
556 }
557 
558 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
559 /**
560  * kbase_pm_do_poweroff_sync - Do the synchronous power down of GPU
561  *
562  * @kbdev: The kbase device structure for the device (must be a valid pointer)
563  *
564  * This function is called at the time of system suspend or device unload
565  * to power down the GPU synchronously. This is needed as the power down of GPU
566  * would usually happen from the runtime suspend callback function (if gpu_active
567  * and gpu_idle callbacks are used) and runtime suspend operation is disabled
568  * when system suspend takes place.
569  * The function first waits for the @gpu_poweroff_wait_work to complete, which
570  * could have been enqueued after the last PM reference was released.
571  *
572  * Return: 0 on success, negative value otherwise.
573  */
kbase_pm_do_poweroff_sync(struct kbase_device * kbdev)574 static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev)
575 {
576 	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
577 	unsigned long flags;
578 	int ret = 0;
579 
580 	WARN_ON(kbdev->pm.active_count);
581 
582 	kbase_pm_wait_for_poweroff_work_complete(kbdev);
583 
584 	kbase_pm_lock(kbdev);
585 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
586 	WARN_ON(backend->poweroff_wait_in_progress);
587 	WARN_ON(backend->gpu_sleep_mode_active);
588 	if (backend->gpu_powered) {
589 
590 		backend->mcu_desired = false;
591 		backend->l2_desired = false;
592 		kbase_pm_update_state(kbdev);
593 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
594 
595 		ret = kbase_pm_wait_for_desired_state(kbdev);
596 		if (ret) {
597 			dev_warn(
598 				kbdev->dev,
599 				"Wait for pm state change failed on synchronous power off");
600 			ret = -EBUSY;
601 			goto out;
602 		}
603 
604 		/* Due to the power policy, GPU could have been kept active
605 		 * throughout and so need to invoke the idle callback before
606 		 * the power down.
607 		 */
608 		if (backend->callback_power_runtime_gpu_idle &&
609 		    !backend->gpu_idled) {
610 			backend->callback_power_runtime_gpu_idle(kbdev);
611 			backend->gpu_idled = true;
612 		}
613 
614 		if (!kbase_pm_clock_off(kbdev)) {
615 			dev_warn(
616 				kbdev->dev,
617 				"Failed to turn off GPU clocks on synchronous power off, MMU faults pending");
618 			ret = -EBUSY;
619 		}
620 	} else {
621 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
622 	}
623 
624 out:
625 	kbase_pm_unlock(kbdev);
626 	return ret;
627 }
628 #endif
629 
kbase_pm_do_poweroff(struct kbase_device * kbdev)630 void kbase_pm_do_poweroff(struct kbase_device *kbdev)
631 {
632 	unsigned long flags;
633 
634 	lockdep_assert_held(&kbdev->pm.lock);
635 
636 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
637 
638 	if (!kbdev->pm.backend.gpu_powered)
639 		goto unlock_hwaccess;
640 
641 	if (kbdev->pm.backend.poweroff_wait_in_progress)
642 		goto unlock_hwaccess;
643 
644 #if MALI_USE_CSF
645 	kbdev->pm.backend.mcu_desired = false;
646 #else
647 	/* Force all cores off */
648 	kbdev->pm.backend.shaders_desired = false;
649 #endif
650 	kbdev->pm.backend.l2_desired = false;
651 
652 	kbdev->pm.backend.poweroff_wait_in_progress = true;
653 	kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = true;
654 
655 	/* l2_desired being false should cause the state machine to
656 	 * start powering off the L2. When it actually is powered off,
657 	 * the interrupt handler will call kbase_pm_l2_update_state()
658 	 * again, which will trigger the kbase_pm_gpu_poweroff_wait_wq.
659 	 * Callers of this function will need to wait on poweroff_wait.
660 	 */
661 	kbase_pm_update_state(kbdev);
662 
663 unlock_hwaccess:
664 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
665 }
666 
is_poweroff_in_progress(struct kbase_device * kbdev)667 static bool is_poweroff_in_progress(struct kbase_device *kbdev)
668 {
669 	bool ret;
670 	unsigned long flags;
671 
672 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
673 	ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
674 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
675 
676 	return ret;
677 }
678 
kbase_pm_wait_for_poweroff_work_complete(struct kbase_device * kbdev)679 void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev)
680 {
681 	wait_event_killable(kbdev->pm.backend.poweroff_wait,
682 			is_poweroff_in_progress(kbdev));
683 }
684 KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_work_complete);
685 
686 /**
687  * is_gpu_powered_down - Check whether GPU is powered down
688  *
689  * @kbdev: kbase device
690  *
691  * Return: true if GPU is powered down, false otherwise
692  */
is_gpu_powered_down(struct kbase_device * kbdev)693 static bool is_gpu_powered_down(struct kbase_device *kbdev)
694 {
695 	bool ret;
696 	unsigned long flags;
697 
698 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
699 	ret = !kbdev->pm.backend.gpu_powered;
700 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
701 
702 	return ret;
703 }
704 
kbase_pm_wait_for_gpu_power_down(struct kbase_device * kbdev)705 void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev)
706 {
707 	wait_event_killable(kbdev->pm.backend.poweroff_wait,
708 			is_gpu_powered_down(kbdev));
709 }
710 KBASE_EXPORT_TEST_API(kbase_pm_wait_for_gpu_power_down);
711 
kbase_hwaccess_pm_powerup(struct kbase_device * kbdev,unsigned int flags)712 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
713 		unsigned int flags)
714 {
715 	unsigned long irq_flags;
716 	int ret;
717 
718 	KBASE_DEBUG_ASSERT(kbdev != NULL);
719 
720 	kbase_pm_lock(kbdev);
721 
722 	/* A suspend won't happen during startup/insmod */
723 	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
724 
725 	/* Power up the GPU, don't enable IRQs as we are not ready to receive
726 	 * them
727 	 */
728 	ret = kbase_pm_init_hw(kbdev, flags);
729 	if (ret) {
730 		kbase_pm_unlock(kbdev);
731 		return ret;
732 	}
733 #if MALI_USE_CSF
734 	kbdev->pm.debug_core_mask =
735 		kbdev->gpu_props.props.raw_props.shader_present;
736 	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
737 	/* Set the initial value for 'shaders_avail'. It would be later
738 	 * modified only from the MCU state machine, when the shader core
739 	 * allocation enable mask request has completed. So its value would
740 	 * indicate the mask of cores that are currently being used by FW for
741 	 * the allocation of endpoints requested by CSGs.
742 	 */
743 	kbdev->pm.backend.shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
744 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
745 #else
746 	kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
747 			kbdev->pm.debug_core_mask[1] =
748 			kbdev->pm.debug_core_mask[2] =
749 			kbdev->gpu_props.props.raw_props.shader_present;
750 #endif
751 
752 	/* Pretend the GPU is active to prevent a power policy turning the GPU
753 	 * cores off
754 	 */
755 	kbdev->pm.active_count = 1;
756 #if MALI_USE_CSF && KBASE_PM_RUNTIME
757 	if (kbdev->pm.backend.callback_power_runtime_gpu_active) {
758 		/* Take the RPM reference count to match with the internal
759 		 * PM reference count
760 		 */
761 		kbdev->pm.backend.callback_power_runtime_gpu_active(kbdev);
762 		WARN_ON(kbdev->pm.backend.gpu_idled);
763 	}
764 #endif
765 
766 	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
767 								irq_flags);
768 	/* Ensure cycle counter is off */
769 	kbdev->pm.backend.gpu_cycle_counter_requests = 0;
770 	spin_unlock_irqrestore(
771 			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
772 								irq_flags);
773 
774 	/* We are ready to receive IRQ's now as power policy is set up, so
775 	 * enable them now.
776 	 */
777 #ifdef CONFIG_MALI_BIFROST_DEBUG
778 	kbdev->pm.backend.driver_ready_for_irqs = true;
779 #endif
780 	kbase_pm_enable_interrupts(kbdev);
781 
782 	WARN_ON(!kbdev->pm.backend.gpu_powered);
783 	/* GPU has been powered up (by kbase_pm_init_hw) and interrupts have
784 	 * been enabled, so GPU is ready for use and PM state machine can be
785 	 * exercised from this point onwards.
786 	 */
787 	kbdev->pm.backend.gpu_ready = true;
788 
789 	/* Turn on the GPU and any cores needed by the policy */
790 #if MALI_USE_CSF
791 	/* Turn on the L2 caches, needed for firmware boot */
792 	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
793 	kbdev->pm.backend.l2_desired = true;
794 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
795 #endif
796 	kbase_pm_do_poweron(kbdev, false);
797 	kbase_pm_unlock(kbdev);
798 
799 	return 0;
800 }
801 
kbase_hwaccess_pm_halt(struct kbase_device * kbdev)802 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
803 {
804 	KBASE_DEBUG_ASSERT(kbdev != NULL);
805 
806 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
807 	WARN_ON(kbase_pm_do_poweroff_sync(kbdev));
808 #else
809 	mutex_lock(&kbdev->pm.lock);
810 	kbase_pm_do_poweroff(kbdev);
811 	mutex_unlock(&kbdev->pm.lock);
812 
813 	kbase_pm_wait_for_poweroff_work_complete(kbdev);
814 #endif
815 }
816 
817 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
818 
kbase_hwaccess_pm_term(struct kbase_device * kbdev)819 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
820 {
821 	KBASE_DEBUG_ASSERT(kbdev != NULL);
822 	KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
823 	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
824 
825 	cancel_work_sync(&kbdev->pm.backend.hwcnt_disable_work);
826 
827 	if (kbdev->pm.backend.hwcnt_disabled) {
828 		unsigned long flags;
829 #if MALI_USE_CSF
830 		kbase_csf_scheduler_spin_lock(kbdev, &flags);
831 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
832 		kbase_csf_scheduler_spin_unlock(kbdev, flags);
833 #else
834 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
835 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
836 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
837 #endif
838 	}
839 
840 	/* Free any resources the policy allocated */
841 	kbase_pm_state_machine_term(kbdev);
842 	kbase_pm_policy_term(kbdev);
843 	kbase_pm_ca_term(kbdev);
844 
845 #if !MALI_USE_CSF
846 	/* Shut down the metrics subsystem */
847 	kbasep_pm_metrics_term(kbdev);
848 #else
849 	if (WARN_ON(mutex_is_locked(&kbdev->pm.backend.policy_change_lock))) {
850 		mutex_lock(&kbdev->pm.backend.policy_change_lock);
851 		mutex_unlock(&kbdev->pm.backend.policy_change_lock);
852 	}
853 	mutex_destroy(&kbdev->pm.backend.policy_change_lock);
854 #endif
855 
856 	destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
857 }
858 
kbase_pm_power_changed(struct kbase_device * kbdev)859 void kbase_pm_power_changed(struct kbase_device *kbdev)
860 {
861 	unsigned long flags;
862 
863 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
864 	kbase_pm_update_state(kbdev);
865 
866 #if !MALI_USE_CSF
867 	kbase_backend_slot_update(kbdev);
868 #endif /* !MALI_USE_CSF */
869 
870 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
871 }
872 
873 #if MALI_USE_CSF
kbase_pm_set_debug_core_mask(struct kbase_device * kbdev,u64 new_core_mask)874 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
875 {
876 	lockdep_assert_held(&kbdev->hwaccess_lock);
877 	lockdep_assert_held(&kbdev->pm.lock);
878 
879 	kbdev->pm.debug_core_mask = new_core_mask;
880 	kbase_pm_update_dynamic_cores_onoff(kbdev);
881 }
882 KBASE_EXPORT_TEST_API(kbase_pm_set_debug_core_mask);
883 #else
kbase_pm_set_debug_core_mask(struct kbase_device * kbdev,u64 new_core_mask_js0,u64 new_core_mask_js1,u64 new_core_mask_js2)884 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
885 		u64 new_core_mask_js0, u64 new_core_mask_js1,
886 		u64 new_core_mask_js2)
887 {
888 	lockdep_assert_held(&kbdev->hwaccess_lock);
889 	lockdep_assert_held(&kbdev->pm.lock);
890 
891 	if (kbase_dummy_job_wa_enabled(kbdev)) {
892 		dev_warn_once(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled");
893 		new_core_mask_js0 = kbdev->pm.debug_core_mask[0];
894 	}
895 
896 	kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
897 	kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
898 	kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
899 	kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
900 			new_core_mask_js2;
901 
902 	kbase_pm_update_dynamic_cores_onoff(kbdev);
903 }
904 #endif /* MALI_USE_CSF */
905 
kbase_hwaccess_pm_gpu_active(struct kbase_device * kbdev)906 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
907 {
908 	kbase_pm_update_active(kbdev);
909 }
910 
kbase_hwaccess_pm_gpu_idle(struct kbase_device * kbdev)911 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
912 {
913 	kbase_pm_update_active(kbdev);
914 }
915 
kbase_hwaccess_pm_suspend(struct kbase_device * kbdev)916 int kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
917 {
918 	int ret = 0;
919 
920 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
921 	ret = kbase_pm_do_poweroff_sync(kbdev);
922 	if (ret)
923 		return ret;
924 #else
925 	/* Force power off the GPU and all cores (regardless of policy), only
926 	 * after the PM active count reaches zero (otherwise, we risk turning it
927 	 * off prematurely)
928 	 */
929 	kbase_pm_lock(kbdev);
930 
931 	kbase_pm_do_poweroff(kbdev);
932 
933 #if !MALI_USE_CSF
934 	kbase_backend_timer_suspend(kbdev);
935 #endif /* !MALI_USE_CSF */
936 
937 	kbase_pm_unlock(kbdev);
938 
939 	kbase_pm_wait_for_poweroff_work_complete(kbdev);
940 #endif
941 
942 	WARN_ON(kbdev->pm.backend.gpu_powered);
943 	WARN_ON(atomic_read(&kbdev->faults_pending));
944 
945 	if (kbdev->pm.backend.callback_power_suspend)
946 		kbdev->pm.backend.callback_power_suspend(kbdev);
947 
948 	return ret;
949 }
950 
kbase_hwaccess_pm_resume(struct kbase_device * kbdev)951 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
952 {
953 	kbase_pm_lock(kbdev);
954 
955 	kbdev->pm.suspending = false;
956 #ifdef CONFIG_MALI_ARBITER_SUPPORT
957 	if (kbase_pm_is_gpu_lost(kbdev)) {
958 		dev_dbg(kbdev->dev, "%s: GPU lost in progress\n", __func__);
959 		kbase_pm_unlock(kbdev);
960 		return;
961 	}
962 #endif
963 	kbase_pm_do_poweron(kbdev, true);
964 
965 #if !MALI_USE_CSF
966 	kbase_backend_timer_resume(kbdev);
967 #endif /* !MALI_USE_CSF */
968 
969 	wake_up_all(&kbdev->pm.resume_wait);
970 	kbase_pm_unlock(kbdev);
971 }
972 
973 #ifdef CONFIG_MALI_ARBITER_SUPPORT
kbase_pm_handle_gpu_lost(struct kbase_device * kbdev)974 void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
975 {
976 	unsigned long flags;
977 	ktime_t end_timestamp = ktime_get_raw();
978 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
979 
980 	if (!kbdev->arb.arb_if)
981 		return;
982 
983 	mutex_lock(&kbdev->pm.lock);
984 	mutex_lock(&arb_vm_state->vm_state_lock);
985 	if (kbdev->pm.backend.gpu_powered &&
986 			!kbase_pm_is_gpu_lost(kbdev)) {
987 		kbase_pm_set_gpu_lost(kbdev, true);
988 
989 		/* GPU is no longer mapped to VM.  So no interrupts will
990 		 * be received and Mali registers have been replaced by
991 		 * dummy RAM
992 		 */
993 		WARN(!kbase_is_gpu_removed(kbdev),
994 			"GPU is still available after GPU lost event\n");
995 
996 		/* Full GPU reset will have been done by hypervisor, so
997 		 * cancel
998 		 */
999 		atomic_set(&kbdev->hwaccess.backend.reset_gpu,
1000 				KBASE_RESET_GPU_NOT_PENDING);
1001 		hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
1002 		kbase_synchronize_irqs(kbdev);
1003 
1004 		/* Clear all jobs running on the GPU */
1005 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1006 		kbdev->protected_mode = false;
1007 		kbase_backend_reset(kbdev, &end_timestamp);
1008 		kbase_pm_metrics_update(kbdev, NULL);
1009 		kbase_pm_update_state(kbdev);
1010 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1011 
1012 		/* Cancel any pending HWC dumps */
1013 		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
1014 		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING ||
1015 				kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
1016 			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
1017 			kbdev->hwcnt.backend.triggered = 1;
1018 			wake_up(&kbdev->hwcnt.backend.wait);
1019 		}
1020 		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
1021 	}
1022 	mutex_unlock(&arb_vm_state->vm_state_lock);
1023 	mutex_unlock(&kbdev->pm.lock);
1024 }
1025 
1026 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
1027 
1028 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device * kbdev)1029 int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev)
1030 {
1031 	unsigned long flags;
1032 
1033 	lockdep_assert_held(&kbdev->pm.lock);
1034 
1035 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1036 	/* Set the override flag to force the power up of L2 cache */
1037 	kbdev->pm.backend.gpu_wakeup_override = true;
1038 	kbase_pm_update_state(kbdev);
1039 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1040 
1041 	return kbase_pm_wait_for_desired_state(kbdev);
1042 }
1043 
pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device * kbdev)1044 static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev)
1045 {
1046 	unsigned long flags;
1047 	int ret;
1048 
1049 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
1050 	lockdep_assert_held(&kbdev->pm.lock);
1051 
1052 #ifdef CONFIG_MALI_BIFROST_DEBUG
1053 	/* In case of no active CSG on slot, powering up L2 could be skipped and
1054 	 * proceed directly to suspend GPU.
1055 	 * ToDo: firmware has to be reloaded after wake-up as no halt command
1056 	 * has been sent when GPU was put to sleep mode.
1057 	 */
1058 	if (!kbase_csf_scheduler_get_nr_active_csgs(kbdev))
1059 		dev_info(
1060 			kbdev->dev,
1061 			"No active CSGs. Can skip the power up of L2 and go for suspension directly");
1062 #endif
1063 
1064 	ret = kbase_pm_force_mcu_wakeup_after_sleep(kbdev);
1065 	if (ret) {
1066 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1067 		dev_warn(
1068 			kbdev->dev,
1069 			"Waiting for MCU to wake up failed on runtime suspend");
1070 		kbdev->pm.backend.gpu_wakeup_override = false;
1071 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1072 		return ret;
1073 	}
1074 
1075 	/* Check if a Doorbell mirror interrupt occurred meanwhile */
1076 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1077 	if (kbdev->pm.backend.gpu_sleep_mode_active &&
1078 	    kbdev->pm.backend.exit_gpu_sleep_mode) {
1079 		dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend after L2 power up");
1080 		kbdev->pm.backend.gpu_wakeup_override = false;
1081 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1082 		return -EBUSY;
1083 	}
1084 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1085 	/* Need to release the kbdev->pm.lock to avoid lock ordering issue
1086 	 * with kctx->reg.lock, which is taken if the sync wait condition is
1087 	 * evaluated after the CSG suspend operation.
1088 	 */
1089 	kbase_pm_unlock(kbdev);
1090 	ret = kbase_csf_scheduler_handle_runtime_suspend(kbdev);
1091 	kbase_pm_lock(kbdev);
1092 
1093 	/* Power down L2 cache */
1094 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1095 	kbdev->pm.backend.gpu_wakeup_override = false;
1096 	kbase_pm_update_state(kbdev);
1097 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1098 
1099 	/* After re-acquiring the kbdev->pm.lock, check if the device
1100 	 * became active (or active then idle) meanwhile.
1101 	 */
1102 	if (kbdev->pm.active_count ||
1103 	    kbdev->pm.backend.poweroff_wait_in_progress) {
1104 		dev_dbg(kbdev->dev,
1105 			"Device became active on runtime suspend after suspending Scheduler");
1106 		ret = -EBUSY;
1107 	}
1108 
1109 	if (ret)
1110 		return ret;
1111 
1112 	ret = kbase_pm_wait_for_desired_state(kbdev);
1113 	if (ret)
1114 		dev_warn(kbdev->dev, "Wait for power down failed on runtime suspend");
1115 
1116 	return ret;
1117 }
1118 
kbase_pm_handle_runtime_suspend(struct kbase_device * kbdev)1119 int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev)
1120 {
1121 	enum kbase_mcu_state mcu_state;
1122 	bool exit_early = false;
1123 	unsigned long flags;
1124 	int ret = 0;
1125 
1126 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1127 	/* This check is needed for the case where Kbase had invoked the
1128 	 * @power_off_callback directly.
1129 	 */
1130 	if (!kbdev->pm.backend.gpu_powered) {
1131 		dev_dbg(kbdev->dev, "GPU already powered down on runtime suspend");
1132 		exit_early = true;
1133 	}
1134 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1135 
1136 	if (exit_early)
1137 		goto out;
1138 
1139 	ret = kbase_reset_gpu_try_prevent(kbdev);
1140 	if (ret == -ENOMEM) {
1141 		dev_dbg(kbdev->dev, "Quit runtime suspend as GPU is in bad state");
1142 		/* Finish the runtime suspend, no point in trying again as GPU is
1143 		 * in irrecoverable bad state.
1144 		 */
1145 		goto out;
1146 	} else if (ret) {
1147 		dev_dbg(kbdev->dev, "Quit runtime suspend for failing to prevent gpu reset");
1148 		ret = -EBUSY;
1149 		goto out;
1150 	}
1151 
1152 	kbase_csf_scheduler_lock(kbdev);
1153 	kbase_pm_lock(kbdev);
1154 
1155 	/*
1156 	 * This is to handle the case where GPU device becomes active and idle
1157 	 * very quickly whilst the runtime suspend callback is executing.
1158 	 * This is useful for the following scenario :-
1159 	 * - GPU goes idle and pm_callback_runtime_gpu_idle() is called.
1160 	 * - Auto-suspend timer expires and kbase_device_runtime_suspend()
1161 	 *   is called.
1162 	 * - GPU becomes active and pm_callback_runtime_gpu_active() calls
1163 	 *   pm_runtime_get().
1164 	 * - Shortly after that GPU becomes idle again.
1165 	 * - kbase_pm_handle_runtime_suspend() gets called.
1166 	 * - pm_callback_runtime_gpu_idle() is called.
1167 	 *
1168 	 * We do not want to power down the GPU immediately after it goes idle.
1169 	 * So if we notice that GPU had become active when the runtime suspend
1170 	 * had already kicked in, we abort the runtime suspend.
1171 	 * By aborting the runtime suspend, we defer the power down of GPU.
1172 	 *
1173 	 * This check also helps prevent warnings regarding L2 and MCU states
1174 	 * inside the pm_handle_power_off() function. The warning stems from
1175 	 * the fact that pm.lock is released before invoking Scheduler function
1176 	 * to suspend the CSGs.
1177 	 */
1178 	if (kbdev->pm.active_count ||
1179 	    kbdev->pm.backend.poweroff_wait_in_progress) {
1180 		dev_dbg(kbdev->dev, "Device became active on runtime suspend");
1181 		ret = -EBUSY;
1182 		goto unlock;
1183 	}
1184 
1185 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1186 	if (kbdev->pm.backend.gpu_sleep_mode_active &&
1187 	    kbdev->pm.backend.exit_gpu_sleep_mode) {
1188 		dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend before L2 power up");
1189 		ret = -EBUSY;
1190 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1191 		goto unlock;
1192 	}
1193 
1194 	mcu_state = kbdev->pm.backend.mcu_state;
1195 	WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state));
1196 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1197 
1198 	if (mcu_state == KBASE_MCU_IN_SLEEP) {
1199 		ret = pm_handle_mcu_sleep_on_runtime_suspend(kbdev);
1200 		if (ret)
1201 			goto unlock;
1202 	}
1203 
1204 	/* Disable interrupts and turn off the GPU clocks */
1205 	if (!kbase_pm_clock_off(kbdev)) {
1206 		dev_warn(kbdev->dev, "Failed to turn off GPU clocks on runtime suspend, MMU faults pending");
1207 
1208 		WARN_ON(!kbdev->poweroff_pending);
1209 		/* Previous call to kbase_pm_clock_off() would have disabled
1210 		 * the interrupts and also synchronized with the interrupt
1211 		 * handlers, so more fault work items can't be enqueued.
1212 		 *
1213 		 * Can't wait for the completion of MMU fault work items as
1214 		 * there is a possibility of a deadlock since the fault work
1215 		 * items would do the group termination which requires the
1216 		 * Scheduler lock.
1217 		 */
1218 		ret = -EBUSY;
1219 		goto unlock;
1220 	}
1221 
1222 	wake_up(&kbdev->pm.backend.poweroff_wait);
1223 	WARN_ON(kbdev->pm.backend.gpu_powered);
1224 	dev_dbg(kbdev->dev, "GPU power down complete");
1225 
1226 unlock:
1227 	kbase_pm_unlock(kbdev);
1228 	kbase_csf_scheduler_unlock(kbdev);
1229 	kbase_reset_gpu_allow(kbdev);
1230 out:
1231 	if (ret) {
1232 		ret = -EBUSY;
1233 		pm_runtime_mark_last_busy(kbdev->dev);
1234 	}
1235 
1236 	return ret;
1237 }
1238 #endif
1239