xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_metrics.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2011-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Metrics for power management
24  */
25 
26 #include <mali_kbase.h>
27 #include <mali_kbase_config_defaults.h>
28 #include <mali_kbase_pm.h>
29 #include <backend/gpu/mali_kbase_pm_internal.h>
30 
31 #if MALI_USE_CSF
32 #include "backend/gpu/mali_kbase_clk_rate_trace_mgr.h"
33 #include <csf/ipa_control/mali_kbase_csf_ipa_control.h>
34 #else
35 #include <backend/gpu/mali_kbase_jm_rb.h>
36 #endif /* !MALI_USE_CSF */
37 
38 #include <backend/gpu/mali_kbase_pm_defs.h>
39 #include <mali_linux_trace.h>
40 
41 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS) || !MALI_USE_CSF
42 /* Shift used for kbasep_pm_metrics_data.time_busy/idle - units of (1 << 8) ns
43  * This gives a maximum period between samples of 2^(32+8)/100 ns = slightly
44  * under 11s. Exceeding this will cause overflow
45  */
46 #define KBASE_PM_TIME_SHIFT			8
47 #endif
48 
49 #if MALI_USE_CSF
50 /* To get the GPU_ACTIVE value in nano seconds unit */
51 #define GPU_ACTIVE_SCALING_FACTOR ((u64)1E9)
52 #endif
53 
54 /*
55  * Possible state transitions
56  * ON        -> ON | OFF | STOPPED
57  * STOPPED   -> ON | OFF
58  * OFF       -> ON
59  *
60  *
61  * ┌─e─┐┌────────────f─────────────┐
62  * │   v│                          v
63  * └───ON ──a──> STOPPED ──b──> OFF
64  *     ^^            │             │
65  *     │└──────c─────┘             │
66  *     │                           │
67  *     └─────────────d─────────────┘
68  *
69  * Transition effects:
70  * a. None
71  * b. Timer expires without restart
72  * c. Timer is not stopped, timer period is unaffected
73  * d. Timer must be restarted
74  * e. Callback is executed and the timer is restarted
75  * f. Timer is cancelled, or the callback is waited on if currently executing. This is called during
76  *    tear-down and should not be subject to a race from an OFF->ON transition
77  */
78 enum dvfs_metric_timer_state { TIMER_OFF, TIMER_STOPPED, TIMER_ON };
79 
80 #ifdef CONFIG_MALI_BIFROST_DVFS
dvfs_callback(struct hrtimer * timer)81 static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
82 {
83 	struct kbasep_pm_metrics_state *metrics;
84 
85 	if (WARN_ON(!timer))
86 		return HRTIMER_NORESTART;
87 
88 	metrics = container_of(timer, struct kbasep_pm_metrics_state, timer);
89 
90 	/* Transition (b) to fully off if timer was stopped, don't restart the timer in this case */
91 	if (atomic_cmpxchg(&metrics->timer_state, TIMER_STOPPED, TIMER_OFF) != TIMER_ON)
92 		return HRTIMER_NORESTART;
93 
94 	kbase_pm_get_dvfs_action(metrics->kbdev);
95 
96 	/* Set the new expiration time and restart (transition e) */
97 	hrtimer_forward_now(timer, HR_TIMER_DELAY_MSEC(metrics->kbdev->pm.dvfs_period));
98 	return HRTIMER_RESTART;
99 }
100 #endif /* CONFIG_MALI_BIFROST_DVFS */
101 
kbasep_pm_metrics_init(struct kbase_device * kbdev)102 int kbasep_pm_metrics_init(struct kbase_device *kbdev)
103 {
104 #if MALI_USE_CSF
105 	struct kbase_ipa_control_perf_counter perf_counter;
106 	int err;
107 
108 	/* One counter group */
109 	const size_t NUM_PERF_COUNTERS = 1;
110 
111 	KBASE_DEBUG_ASSERT(kbdev != NULL);
112 	kbdev->pm.backend.metrics.kbdev = kbdev;
113 	kbdev->pm.backend.metrics.time_period_start = ktime_get_raw();
114 	kbdev->pm.backend.metrics.values.time_busy = 0;
115 	kbdev->pm.backend.metrics.values.time_idle = 0;
116 	kbdev->pm.backend.metrics.values.time_in_protm = 0;
117 
118 	perf_counter.scaling_factor = GPU_ACTIVE_SCALING_FACTOR;
119 
120 	/* Normalize values by GPU frequency */
121 	perf_counter.gpu_norm = true;
122 
123 	/* We need the GPU_ACTIVE counter, which is in the CSHW group */
124 	perf_counter.type = KBASE_IPA_CORE_TYPE_CSHW;
125 
126 	/* We need the GPU_ACTIVE counter */
127 	perf_counter.idx = GPU_ACTIVE_CNT_IDX;
128 
129 	err = kbase_ipa_control_register(
130 		kbdev, &perf_counter, NUM_PERF_COUNTERS,
131 		&kbdev->pm.backend.metrics.ipa_control_client);
132 	if (err) {
133 		dev_err(kbdev->dev,
134 			"Failed to register IPA with kbase_ipa_control: err=%d",
135 			err);
136 		return -1;
137 	}
138 #else
139 	KBASE_DEBUG_ASSERT(kbdev != NULL);
140 	kbdev->pm.backend.metrics.kbdev = kbdev;
141 	kbdev->pm.backend.metrics.time_period_start = ktime_get_raw();
142 
143 	kbdev->pm.backend.metrics.gpu_active = false;
144 	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
145 	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
146 	kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
147 	kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
148 	kbdev->pm.backend.metrics.active_gl_ctx[2] = 0;
149 
150 	kbdev->pm.backend.metrics.values.time_busy = 0;
151 	kbdev->pm.backend.metrics.values.time_idle = 0;
152 	kbdev->pm.backend.metrics.values.busy_cl[0] = 0;
153 	kbdev->pm.backend.metrics.values.busy_cl[1] = 0;
154 	kbdev->pm.backend.metrics.values.busy_gl = 0;
155 
156 #endif
157 	spin_lock_init(&kbdev->pm.backend.metrics.lock);
158 
159 #ifdef CONFIG_MALI_BIFROST_DVFS
160 	hrtimer_init(&kbdev->pm.backend.metrics.timer, CLOCK_MONOTONIC,
161 							HRTIMER_MODE_REL);
162 	kbdev->pm.backend.metrics.timer.function = dvfs_callback;
163 	kbdev->pm.backend.metrics.initialized = true;
164 	atomic_set(&kbdev->pm.backend.metrics.timer_state, TIMER_OFF);
165 	kbase_pm_metrics_start(kbdev);
166 #endif /* CONFIG_MALI_BIFROST_DVFS */
167 
168 #if MALI_USE_CSF
169 	/* The sanity check on the GPU_ACTIVE performance counter
170 	 * is skipped for Juno platforms that have timing problems.
171 	 */
172 	kbdev->pm.backend.metrics.skip_gpu_active_sanity_check =
173 		of_machine_is_compatible("arm,juno");
174 #endif
175 
176 	return 0;
177 }
178 KBASE_EXPORT_TEST_API(kbasep_pm_metrics_init);
179 
kbasep_pm_metrics_term(struct kbase_device * kbdev)180 void kbasep_pm_metrics_term(struct kbase_device *kbdev)
181 {
182 #ifdef CONFIG_MALI_BIFROST_DVFS
183 	KBASE_DEBUG_ASSERT(kbdev != NULL);
184 
185 	/* Cancel the timer, and block if the callback is currently executing (transition f) */
186 	kbdev->pm.backend.metrics.initialized = false;
187 	atomic_set(&kbdev->pm.backend.metrics.timer_state, TIMER_OFF);
188 	hrtimer_cancel(&kbdev->pm.backend.metrics.timer);
189 #endif /* CONFIG_MALI_BIFROST_DVFS */
190 
191 #if MALI_USE_CSF
192 	kbase_ipa_control_unregister(
193 		kbdev, kbdev->pm.backend.metrics.ipa_control_client);
194 #endif
195 }
196 
197 KBASE_EXPORT_TEST_API(kbasep_pm_metrics_term);
198 
199 /* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
200  * function
201  */
202 #if MALI_USE_CSF
203 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS)
kbase_pm_get_dvfs_utilisation_calc(struct kbase_device * kbdev)204 static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev)
205 {
206 	int err;
207 	u64 gpu_active_counter;
208 	u64 protected_time;
209 	ktime_t now;
210 
211 	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
212 
213 	/* Query IPA_CONTROL for the latest GPU-active and protected-time
214 	 * info.
215 	 */
216 	err = kbase_ipa_control_query(
217 		kbdev, kbdev->pm.backend.metrics.ipa_control_client,
218 		&gpu_active_counter, 1, &protected_time);
219 
220 	/* Read the timestamp after reading the GPU_ACTIVE counter value.
221 	 * This ensures the time gap between the 2 reads is consistent for
222 	 * a meaningful comparison between the increment of GPU_ACTIVE and
223 	 * elapsed time. The lock taken inside kbase_ipa_control_query()
224 	 * function can cause lot of variation.
225 	 */
226 	now = ktime_get_raw();
227 
228 	if (err) {
229 		dev_err(kbdev->dev,
230 			"Failed to query the increment of GPU_ACTIVE counter: err=%d",
231 			err);
232 	} else {
233 		u64 diff_ns;
234 		s64 diff_ns_signed;
235 		u32 ns_time;
236 		ktime_t diff = ktime_sub(
237 			now, kbdev->pm.backend.metrics.time_period_start);
238 
239 		diff_ns_signed = ktime_to_ns(diff);
240 
241 		if (diff_ns_signed < 0)
242 			return;
243 
244 		diff_ns = (u64)diff_ns_signed;
245 
246 #if !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
247 		/* The GPU_ACTIVE counter shouldn't clock-up more time than has
248 		 * actually elapsed - but still some margin needs to be given
249 		 * when doing the comparison. There could be some drift between
250 		 * the CPU and GPU clock.
251 		 *
252 		 * Can do the check only in a real driver build, as an arbitrary
253 		 * value for GPU_ACTIVE can be fed into dummy model in no_mali
254 		 * configuration which may not correspond to the real elapsed
255 		 * time.
256 		 */
257 		if (!kbdev->pm.backend.metrics.skip_gpu_active_sanity_check) {
258 			/* The margin is scaled to allow for the worst-case
259 			 * scenario where the samples are maximally separated,
260 			 * plus a small offset for sampling errors.
261 			 */
262 			u64 const MARGIN_NS =
263 				IPA_CONTROL_TIMER_DEFAULT_VALUE_MS * NSEC_PER_MSEC * 3 / 2;
264 
265 			if (gpu_active_counter > (diff_ns + MARGIN_NS)) {
266 				dev_info(
267 					kbdev->dev,
268 					"GPU activity takes longer than time interval: %llu ns > %llu ns",
269 					(unsigned long long)gpu_active_counter,
270 					(unsigned long long)diff_ns);
271 			}
272 		}
273 #endif
274 		/* Calculate time difference in units of 256ns */
275 		ns_time = (u32)(diff_ns >> KBASE_PM_TIME_SHIFT);
276 
277 		/* Add protected_time to gpu_active_counter so that time in
278 		 * protected mode is included in the apparent GPU active time,
279 		 * then convert it from units of 1ns to units of 256ns, to
280 		 * match what JM GPUs use. The assumption is made here that the
281 		 * GPU is 100% busy while in protected mode, so we should add
282 		 * this since the GPU can't (and thus won't) update these
283 		 * counters while it's actually in protected mode.
284 		 *
285 		 * Perform the add after dividing each value down, to reduce
286 		 * the chances of overflows.
287 		 */
288 		protected_time >>= KBASE_PM_TIME_SHIFT;
289 		gpu_active_counter >>= KBASE_PM_TIME_SHIFT;
290 		gpu_active_counter += protected_time;
291 
292 		/* Ensure the following equations don't go wrong if ns_time is
293 		 * slightly larger than gpu_active_counter somehow
294 		 */
295 		gpu_active_counter = MIN(gpu_active_counter, ns_time);
296 
297 		kbdev->pm.backend.metrics.values.time_busy +=
298 			gpu_active_counter;
299 
300 		kbdev->pm.backend.metrics.values.time_idle +=
301 			ns_time - gpu_active_counter;
302 
303 		/* Also make time in protected mode available explicitly,
304 		 * so users of this data have this info, too.
305 		 */
306 		kbdev->pm.backend.metrics.values.time_in_protm +=
307 			protected_time;
308 	}
309 
310 	kbdev->pm.backend.metrics.time_period_start = now;
311 }
312 #endif /* defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS) */
313 #else
kbase_pm_get_dvfs_utilisation_calc(struct kbase_device * kbdev,ktime_t now)314 static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
315 					       ktime_t now)
316 {
317 	ktime_t diff;
318 
319 	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
320 
321 	diff = ktime_sub(now, kbdev->pm.backend.metrics.time_period_start);
322 	if (ktime_to_ns(diff) < 0)
323 		return;
324 
325 	if (kbdev->pm.backend.metrics.gpu_active) {
326 		u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
327 
328 		kbdev->pm.backend.metrics.values.time_busy += ns_time;
329 		if (kbdev->pm.backend.metrics.active_cl_ctx[0])
330 			kbdev->pm.backend.metrics.values.busy_cl[0] += ns_time;
331 		if (kbdev->pm.backend.metrics.active_cl_ctx[1])
332 			kbdev->pm.backend.metrics.values.busy_cl[1] += ns_time;
333 		if (kbdev->pm.backend.metrics.active_gl_ctx[0])
334 			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
335 		if (kbdev->pm.backend.metrics.active_gl_ctx[1])
336 			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
337 		if (kbdev->pm.backend.metrics.active_gl_ctx[2])
338 			kbdev->pm.backend.metrics.values.busy_gl += ns_time;
339 	} else {
340 		kbdev->pm.backend.metrics.values.time_idle +=
341 			(u32)(ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
342 	}
343 
344 	kbdev->pm.backend.metrics.time_period_start = now;
345 }
346 #endif  /* MALI_USE_CSF */
347 
348 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS)
kbase_pm_get_dvfs_metrics(struct kbase_device * kbdev,struct kbasep_pm_metrics * last,struct kbasep_pm_metrics * diff)349 void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
350 			       struct kbasep_pm_metrics *last,
351 			       struct kbasep_pm_metrics *diff)
352 {
353 	struct kbasep_pm_metrics *cur = &kbdev->pm.backend.metrics.values;
354 	unsigned long flags;
355 
356 	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
357 #if MALI_USE_CSF
358 	kbase_pm_get_dvfs_utilisation_calc(kbdev);
359 #else
360 	kbase_pm_get_dvfs_utilisation_calc(kbdev, ktime_get_raw());
361 #endif
362 
363 	memset(diff, 0, sizeof(*diff));
364 	diff->time_busy = cur->time_busy - last->time_busy;
365 	diff->time_idle = cur->time_idle - last->time_idle;
366 
367 #if MALI_USE_CSF
368 	diff->time_in_protm = cur->time_in_protm - last->time_in_protm;
369 #else
370 	diff->busy_cl[0] = cur->busy_cl[0] - last->busy_cl[0];
371 	diff->busy_cl[1] = cur->busy_cl[1] - last->busy_cl[1];
372 	diff->busy_gl = cur->busy_gl - last->busy_gl;
373 #endif
374 
375 	*last = *cur;
376 
377 	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
378 }
379 KBASE_EXPORT_TEST_API(kbase_pm_get_dvfs_metrics);
380 #endif
381 
382 #ifdef CONFIG_MALI_BIFROST_DVFS
kbase_pm_get_dvfs_action(struct kbase_device * kbdev)383 void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
384 {
385 	int utilisation;
386 	struct kbasep_pm_metrics *diff;
387 #if !MALI_USE_CSF
388 	int busy;
389 	int util_gl_share;
390 	int util_cl_share[2];
391 #endif
392 
393 	KBASE_DEBUG_ASSERT(kbdev != NULL);
394 
395 	diff = &kbdev->pm.backend.metrics.dvfs_diff;
396 
397 	kbase_pm_get_dvfs_metrics(kbdev, &kbdev->pm.backend.metrics.dvfs_last,
398 				  diff);
399 
400 	utilisation = (100 * diff->time_busy) /
401 			max(diff->time_busy + diff->time_idle, 1u);
402 
403 #if !MALI_USE_CSF
404 	busy = max(diff->busy_gl + diff->busy_cl[0] + diff->busy_cl[1], 1u);
405 
406 	util_gl_share = (100 * diff->busy_gl) / busy;
407 	util_cl_share[0] = (100 * diff->busy_cl[0]) / busy;
408 	util_cl_share[1] = (100 * diff->busy_cl[1]) / busy;
409 
410 	kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share,
411 				  util_cl_share);
412 #else
413 	/* Note that, at present, we don't pass protected-mode time to the
414 	 * platform here. It's unlikely to be useful, however, as the platform
415 	 * probably just cares whether the GPU is busy or not; time in
416 	 * protected mode is already added to busy-time at this point, though,
417 	 * so we should be good.
418 	 */
419 	kbase_platform_dvfs_event(kbdev, utilisation);
420 #endif
421 }
422 
kbase_pm_metrics_is_active(struct kbase_device * kbdev)423 bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
424 {
425 	KBASE_DEBUG_ASSERT(kbdev != NULL);
426 
427 	return atomic_read(&kbdev->pm.backend.metrics.timer_state) == TIMER_ON;
428 }
429 KBASE_EXPORT_TEST_API(kbase_pm_metrics_is_active);
430 
kbase_pm_metrics_start(struct kbase_device * kbdev)431 void kbase_pm_metrics_start(struct kbase_device *kbdev)
432 {
433 	struct kbasep_pm_metrics_state *metrics = &kbdev->pm.backend.metrics;
434 
435 	if (unlikely(!metrics->initialized))
436 		return;
437 
438 	/* Transition to ON, from a stopped state (transition c) */
439 	if (atomic_xchg(&metrics->timer_state, TIMER_ON) == TIMER_OFF)
440 		/* Start the timer only if it's been fully stopped (transition d)*/
441 		hrtimer_start(&metrics->timer, HR_TIMER_DELAY_MSEC(kbdev->pm.dvfs_period),
442 			      HRTIMER_MODE_REL);
443 }
444 
kbase_pm_metrics_stop(struct kbase_device * kbdev)445 void kbase_pm_metrics_stop(struct kbase_device *kbdev)
446 {
447 	if (unlikely(!kbdev->pm.backend.metrics.initialized))
448 		return;
449 
450 	/* Timer is Stopped if its currently on (transition a) */
451 	atomic_cmpxchg(&kbdev->pm.backend.metrics.timer_state, TIMER_ON, TIMER_STOPPED);
452 }
453 
454 
455 #endif /* CONFIG_MALI_BIFROST_DVFS */
456 
457 #if !MALI_USE_CSF
458 /**
459  * kbase_pm_metrics_active_calc - Update PM active counts based on currently
460  *                                running atoms
461  * @kbdev: Device pointer
462  *
463  * The caller must hold kbdev->pm.backend.metrics.lock
464  */
kbase_pm_metrics_active_calc(struct kbase_device * kbdev)465 static void kbase_pm_metrics_active_calc(struct kbase_device *kbdev)
466 {
467 	unsigned int js;
468 
469 	lockdep_assert_held(&kbdev->pm.backend.metrics.lock);
470 
471 	kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
472 	kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
473 	kbdev->pm.backend.metrics.active_gl_ctx[2] = 0;
474 	kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
475 	kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
476 	kbdev->pm.backend.metrics.gpu_active = false;
477 
478 	for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
479 		struct kbase_jd_atom *katom = kbase_gpu_inspect(kbdev, js, 0);
480 
481 		/* Head atom may have just completed, so if it isn't running
482 		 * then try the next atom
483 		 */
484 		if (katom && katom->gpu_rb_state != KBASE_ATOM_GPU_RB_SUBMITTED)
485 			katom = kbase_gpu_inspect(kbdev, js, 1);
486 
487 		if (katom && katom->gpu_rb_state ==
488 				KBASE_ATOM_GPU_RB_SUBMITTED) {
489 			if (katom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
490 				int device_nr = (katom->core_req &
491 					BASE_JD_REQ_SPECIFIC_COHERENT_GROUP)
492 						? katom->device_nr : 0;
493 				if (!WARN_ON(device_nr >= 2))
494 					kbdev->pm.backend.metrics.active_cl_ctx[device_nr] = 1;
495 			} else {
496 				kbdev->pm.backend.metrics.active_gl_ctx[js] = 1;
497 				trace_sysgraph(SGR_ACTIVE, 0, js);
498 			}
499 			kbdev->pm.backend.metrics.gpu_active = true;
500 		} else {
501 			trace_sysgraph(SGR_INACTIVE, 0, js);
502 		}
503 	}
504 }
505 
506 /* called when job is submitted to or removed from a GPU slot */
kbase_pm_metrics_update(struct kbase_device * kbdev,ktime_t * timestamp)507 void kbase_pm_metrics_update(struct kbase_device *kbdev, ktime_t *timestamp)
508 {
509 	unsigned long flags;
510 	ktime_t now;
511 
512 	lockdep_assert_held(&kbdev->hwaccess_lock);
513 
514 	spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
515 
516 	if (!timestamp) {
517 		now = ktime_get_raw();
518 		timestamp = &now;
519 	}
520 
521 	/* Track how much of time has been spent busy or idle. For JM GPUs,
522 	 * this also evaluates how long CL and/or GL jobs have been busy for.
523 	 */
524 	kbase_pm_get_dvfs_utilisation_calc(kbdev, *timestamp);
525 
526 	kbase_pm_metrics_active_calc(kbdev);
527 	spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
528 }
529 #endif /* !MALI_USE_CSF */
530