xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_policy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Power policy API implementations
24  */
25 
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <mali_kbase_pm.h>
29 #include <backend/gpu/mali_kbase_pm_internal.h>
30 #include <mali_kbase_reset_gpu.h>
31 
32 #if MALI_USE_CSF && defined CONFIG_MALI_BIFROST_DEBUG
33 #include <csf/mali_kbase_csf_firmware.h>
34 #endif
35 
36 #include <linux/of.h>
37 
38 static const struct kbase_pm_policy *const all_policy_list[] = {
39 #if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
40 	&kbase_pm_always_on_policy_ops,
41 	&kbase_pm_coarse_demand_policy_ops,
42 #else /* CONFIG_MALI_BIFROST_NO_MALI */
43 	&kbase_pm_coarse_demand_policy_ops,
44 	&kbase_pm_always_on_policy_ops,
45 #endif /* CONFIG_MALI_BIFROST_NO_MALI */
46 };
47 
kbase_pm_policy_init(struct kbase_device * kbdev)48 void kbase_pm_policy_init(struct kbase_device *kbdev)
49 {
50 	const struct kbase_pm_policy *default_policy = all_policy_list[0];
51 	struct device_node *np = kbdev->dev->of_node;
52 	const char *power_policy_name;
53 	unsigned long flags;
54 	int i;
55 
56 	if (of_property_read_string(np, "power_policy", &power_policy_name) == 0) {
57 		for (i = 0; i < ARRAY_SIZE(all_policy_list); i++)
58 			if (sysfs_streq(all_policy_list[i]->name, power_policy_name)) {
59 				default_policy = all_policy_list[i];
60 				break;
61 			}
62 	}
63 
64 #if MALI_USE_CSF && defined(CONFIG_MALI_BIFROST_DEBUG)
65 	/* Use always_on policy if module param fw_debug=1 is
66 	 * passed, to aid firmware debugging.
67 	 */
68 	if (fw_debug)
69 		default_policy = &kbase_pm_always_on_policy_ops;
70 #endif
71 
72 	default_policy->init(kbdev);
73 
74 #if MALI_USE_CSF
75 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
76 	kbdev->pm.backend.pm_current_policy = default_policy;
77 	kbdev->pm.backend.csf_pm_sched_flags = default_policy->pm_sched_flags;
78 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
79 #else
80 	CSTD_UNUSED(flags);
81 	kbdev->pm.backend.pm_current_policy = default_policy;
82 #endif
83 }
84 
kbase_pm_policy_term(struct kbase_device * kbdev)85 void kbase_pm_policy_term(struct kbase_device *kbdev)
86 {
87 	kbdev->pm.backend.pm_current_policy->term(kbdev);
88 }
89 
kbase_pm_update_active(struct kbase_device * kbdev)90 void kbase_pm_update_active(struct kbase_device *kbdev)
91 {
92 	struct kbase_pm_device_data *pm = &kbdev->pm;
93 	struct kbase_pm_backend_data *backend = &pm->backend;
94 	unsigned long flags;
95 	bool active;
96 
97 	lockdep_assert_held(&pm->lock);
98 
99 	/* pm_current_policy will never be NULL while pm.lock is held */
100 	KBASE_DEBUG_ASSERT(backend->pm_current_policy);
101 
102 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
103 
104 	active = backend->pm_current_policy->get_core_active(kbdev);
105 	WARN((kbase_pm_is_active(kbdev) && !active),
106 		"GPU is active but policy '%s' is indicating that it can be powered off",
107 		kbdev->pm.backend.pm_current_policy->name);
108 
109 	if (active) {
110 		/* Power on the GPU and any cores requested by the policy */
111 		if (!pm->backend.invoke_poweroff_wait_wq_when_l2_off &&
112 				pm->backend.poweroff_wait_in_progress) {
113 			KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
114 			pm->backend.poweron_required = true;
115 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
116 		} else {
117 			/* Cancel the invocation of
118 			 * kbase_pm_gpu_poweroff_wait_wq() from the L2 state
119 			 * machine. This is safe - it
120 			 * invoke_poweroff_wait_wq_when_l2_off is true, then
121 			 * the poweroff work hasn't even been queued yet,
122 			 * meaning we can go straight to powering on.
123 			 */
124 			pm->backend.invoke_poweroff_wait_wq_when_l2_off = false;
125 			pm->backend.poweroff_wait_in_progress = false;
126 			pm->backend.l2_desired = true;
127 #if MALI_USE_CSF
128 			pm->backend.mcu_desired = true;
129 #endif
130 
131 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
132 			kbase_pm_do_poweron(kbdev, false);
133 		}
134 	} else {
135 		/* It is an error for the power policy to power off the GPU
136 		 * when there are contexts active
137 		 */
138 		KBASE_DEBUG_ASSERT(pm->active_count == 0);
139 
140 		pm->backend.poweron_required = false;
141 
142 		/* Request power off */
143 		if (pm->backend.gpu_powered) {
144 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
145 
146 			/* Power off the GPU immediately */
147 			kbase_pm_do_poweroff(kbdev);
148 		} else {
149 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
150 		}
151 	}
152 }
153 
kbase_pm_update_dynamic_cores_onoff(struct kbase_device * kbdev)154 void kbase_pm_update_dynamic_cores_onoff(struct kbase_device *kbdev)
155 {
156 	bool shaders_desired;
157 
158 	lockdep_assert_held(&kbdev->hwaccess_lock);
159 	lockdep_assert_held(&kbdev->pm.lock);
160 
161 	if (kbdev->pm.backend.pm_current_policy == NULL)
162 		return;
163 	if (kbdev->pm.backend.poweroff_wait_in_progress)
164 		return;
165 
166 #if MALI_USE_CSF
167 	CSTD_UNUSED(shaders_desired);
168 	/* Invoke the MCU state machine to send a request to FW for updating
169 	 * the mask of shader cores that can be used for allocation of
170 	 * endpoints requested by CSGs.
171 	 */
172 	if (kbase_pm_is_mcu_desired(kbdev))
173 		kbase_pm_update_state(kbdev);
174 #else
175 	/* In protected transition, don't allow outside shader core request
176 	 * affect transition, return directly
177 	 */
178 	if (kbdev->pm.backend.protected_transition_override)
179 		return;
180 
181 	shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev);
182 
183 	if (shaders_desired && kbase_pm_is_l2_desired(kbdev))
184 		kbase_pm_update_state(kbdev);
185 #endif
186 }
187 
kbase_pm_update_cores_state_nolock(struct kbase_device * kbdev)188 void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
189 {
190 	bool shaders_desired = false;
191 
192 	lockdep_assert_held(&kbdev->hwaccess_lock);
193 
194 	if (kbdev->pm.backend.pm_current_policy == NULL)
195 		return;
196 	if (kbdev->pm.backend.poweroff_wait_in_progress)
197 		return;
198 
199 #if !MALI_USE_CSF
200 	if (kbdev->pm.backend.protected_transition_override)
201 		/* We are trying to change in/out of protected mode - force all
202 		 * cores off so that the L2 powers down
203 		 */
204 		shaders_desired = false;
205 	else
206 		shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev);
207 #endif
208 
209 	if (kbdev->pm.backend.shaders_desired != shaders_desired) {
210 		KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, kbdev->pm.backend.shaders_desired);
211 
212 		kbdev->pm.backend.shaders_desired = shaders_desired;
213 		kbase_pm_update_state(kbdev);
214 	}
215 }
216 
kbase_pm_update_cores_state(struct kbase_device * kbdev)217 void kbase_pm_update_cores_state(struct kbase_device *kbdev)
218 {
219 	unsigned long flags;
220 
221 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
222 
223 	kbase_pm_update_cores_state_nolock(kbdev);
224 
225 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
226 }
227 
kbase_pm_list_policies(struct kbase_device * kbdev,const struct kbase_pm_policy * const ** list)228 int kbase_pm_list_policies(struct kbase_device *kbdev,
229 	const struct kbase_pm_policy * const **list)
230 {
231 	if (list)
232 		*list = all_policy_list;
233 
234 	return ARRAY_SIZE(all_policy_list);
235 }
236 
237 KBASE_EXPORT_TEST_API(kbase_pm_list_policies);
238 
kbase_pm_get_policy(struct kbase_device * kbdev)239 const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
240 {
241 	KBASE_DEBUG_ASSERT(kbdev != NULL);
242 
243 	return kbdev->pm.backend.pm_current_policy;
244 }
245 
246 KBASE_EXPORT_TEST_API(kbase_pm_get_policy);
247 
248 #if MALI_USE_CSF
policy_change_wait_for_L2_off(struct kbase_device * kbdev)249 static int policy_change_wait_for_L2_off(struct kbase_device *kbdev)
250 {
251 	long remaining;
252 	long timeout = kbase_csf_timeout_in_jiffies(kbase_get_timeout_ms(kbdev, CSF_PM_TIMEOUT));
253 	int err = 0;
254 
255 	/* Wait for L2 becoming off, by which the MCU is also implicitly off
256 	 * since the L2 state machine would only start its power-down
257 	 * sequence when the MCU is in off state. The L2 off is required
258 	 * as the tiler may need to be power cycled for MCU reconfiguration
259 	 * for host control of shader cores.
260 	 */
261 #if KERNEL_VERSION(4, 13, 1) <= LINUX_VERSION_CODE
262 	remaining = wait_event_killable_timeout(
263 		kbdev->pm.backend.gpu_in_desired_state_wait,
264 		kbdev->pm.backend.l2_state == KBASE_L2_OFF, timeout);
265 #else
266 	remaining = wait_event_timeout(
267 		kbdev->pm.backend.gpu_in_desired_state_wait,
268 		kbdev->pm.backend.l2_state == KBASE_L2_OFF, timeout);
269 #endif
270 
271 	if (!remaining) {
272 		err = -ETIMEDOUT;
273 	} else if (remaining < 0) {
274 		dev_info(kbdev->dev,
275 			 "Wait for L2_off got interrupted");
276 		err = (int)remaining;
277 	}
278 
279 	dev_dbg(kbdev->dev, "%s: err=%d mcu_state=%d, L2_state=%d\n", __func__,
280 		err, kbdev->pm.backend.mcu_state, kbdev->pm.backend.l2_state);
281 
282 	return err;
283 }
284 #endif
285 
kbase_pm_set_policy(struct kbase_device * kbdev,const struct kbase_pm_policy * new_policy)286 void kbase_pm_set_policy(struct kbase_device *kbdev,
287 				const struct kbase_pm_policy *new_policy)
288 {
289 	const struct kbase_pm_policy *old_policy;
290 	unsigned long flags;
291 #if MALI_USE_CSF
292 	unsigned int new_policy_csf_pm_sched_flags;
293 	bool sched_suspend;
294 	bool reset_gpu = false;
295 	bool reset_op_prevented = true;
296 	struct kbase_csf_scheduler *scheduler = NULL;
297 #endif
298 
299 	KBASE_DEBUG_ASSERT(kbdev != NULL);
300 	KBASE_DEBUG_ASSERT(new_policy != NULL);
301 
302 	KBASE_KTRACE_ADD(kbdev, PM_SET_POLICY, NULL, new_policy->id);
303 
304 #if MALI_USE_CSF
305 	scheduler = &kbdev->csf.scheduler;
306 	KBASE_DEBUG_ASSERT(scheduler != NULL);
307 
308 	/* Serialize calls on kbase_pm_set_policy() */
309 	mutex_lock(&kbdev->pm.backend.policy_change_lock);
310 
311 	if (kbase_reset_gpu_prevent_and_wait(kbdev)) {
312 		dev_warn(kbdev->dev, "Set PM policy failing to prevent gpu reset");
313 		reset_op_prevented = false;
314 	}
315 
316 	/* In case of CSF, the scheduler may be invoked to suspend. In that
317 	 * case, there is a risk that the L2 may be turned on by the time we
318 	 * check it here. So we hold the scheduler lock to avoid other operations
319 	 * interfering with the policy change and vice versa.
320 	 */
321 	mutex_lock(&scheduler->lock);
322 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
323 	/* policy_change_clamp_state_to_off, when needed, is set/cleared in
324 	 * this function, a very limited temporal scope for covering the
325 	 * change transition.
326 	 */
327 	WARN_ON(kbdev->pm.backend.policy_change_clamp_state_to_off);
328 	new_policy_csf_pm_sched_flags = new_policy->pm_sched_flags;
329 
330 	/* Requiring the scheduler PM suspend operation when changes involving
331 	 * the always_on policy, reflected by the CSF_DYNAMIC_PM_CORE_KEEP_ON
332 	 * flag bit.
333 	 */
334 	sched_suspend = reset_op_prevented &&
335 			(CSF_DYNAMIC_PM_CORE_KEEP_ON &
336 			 (new_policy_csf_pm_sched_flags | kbdev->pm.backend.csf_pm_sched_flags));
337 
338 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
339 
340 	if (sched_suspend) {
341 		/* Update the suspend flag to reflect actually suspend being done ! */
342 		sched_suspend = !kbase_csf_scheduler_pm_suspend_no_lock(kbdev);
343 		/* Set the reset recovery flag if the required suspend failed */
344 		reset_gpu = !sched_suspend;
345 	}
346 
347 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
348 
349 	kbdev->pm.backend.policy_change_clamp_state_to_off = sched_suspend;
350 	kbase_pm_update_state(kbdev);
351 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
352 
353 	if (sched_suspend)
354 		reset_gpu = policy_change_wait_for_L2_off(kbdev);
355 #endif
356 
357 	/* During a policy change we pretend the GPU is active */
358 	/* A suspend won't happen here, because we're in a syscall from a
359 	 * userspace thread
360 	 */
361 	kbase_pm_context_active(kbdev);
362 
363 	kbase_pm_lock(kbdev);
364 
365 	/* Remove the policy to prevent IRQ handlers from working on it */
366 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
367 	old_policy = kbdev->pm.backend.pm_current_policy;
368 	kbdev->pm.backend.pm_current_policy = NULL;
369 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
370 
371 	KBASE_KTRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, old_policy->id);
372 	if (old_policy->term)
373 		old_policy->term(kbdev);
374 
375 	memset(&kbdev->pm.backend.pm_policy_data, 0,
376 	       sizeof(union kbase_pm_policy_data));
377 
378 	KBASE_KTRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, new_policy->id);
379 	if (new_policy->init)
380 		new_policy->init(kbdev);
381 
382 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
383 	kbdev->pm.backend.pm_current_policy = new_policy;
384 #if MALI_USE_CSF
385 	kbdev->pm.backend.csf_pm_sched_flags = new_policy_csf_pm_sched_flags;
386 	/* New policy in place, release the clamping on mcu/L2 off state */
387 	kbdev->pm.backend.policy_change_clamp_state_to_off = false;
388 	kbase_pm_update_state(kbdev);
389 #endif
390 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
391 
392 	/* If any core power state changes were previously attempted, but
393 	 * couldn't be made because the policy was changing (current_policy was
394 	 * NULL), then re-try them here.
395 	 */
396 	kbase_pm_update_active(kbdev);
397 	kbase_pm_update_cores_state(kbdev);
398 
399 	kbase_pm_unlock(kbdev);
400 
401 	/* Now the policy change is finished, we release our fake context active
402 	 * reference
403 	 */
404 	kbase_pm_context_idle(kbdev);
405 
406 #if MALI_USE_CSF
407 	/* Reverse the suspension done */
408 	if (sched_suspend)
409 		kbase_csf_scheduler_pm_resume_no_lock(kbdev);
410 	mutex_unlock(&scheduler->lock);
411 
412 	if (reset_op_prevented)
413 		kbase_reset_gpu_allow(kbdev);
414 
415 	if (reset_gpu) {
416 		dev_warn(kbdev->dev, "Resorting to GPU reset for policy change\n");
417 		if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE))
418 			kbase_reset_gpu(kbdev);
419 		kbase_reset_gpu_wait(kbdev);
420 	}
421 
422 	mutex_unlock(&kbdev->pm.backend.policy_change_lock);
423 #endif
424 }
425 
426 KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
427