xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Power management API definitions used internally by GPU backend
24  */
25 
26 #ifndef _KBASE_BACKEND_PM_INTERNAL_H_
27 #define _KBASE_BACKEND_PM_INTERNAL_H_
28 
29 #include <mali_kbase_hwaccess_pm.h>
30 
31 #include "backend/gpu/mali_kbase_pm_ca.h"
32 #include "mali_kbase_pm_policy.h"
33 
34 
35 /**
36  * kbase_pm_dev_idle - The GPU is idle.
37  *
38  * @kbdev: The kbase device structure for the device (must be a valid pointer)
39  *
40  * The OS may choose to turn off idle devices
41  */
42 void kbase_pm_dev_idle(struct kbase_device *kbdev);
43 
44 /**
45  * kbase_pm_dev_activate - The GPU is active.
46  *
47  * @kbdev: The kbase device structure for the device (must be a valid pointer)
48  *
49  * The OS should avoid opportunistically turning off the GPU while it is active
50  */
51 void kbase_pm_dev_activate(struct kbase_device *kbdev);
52 
53 /**
54  * kbase_pm_get_present_cores - Get details of the cores that are present in
55  *                              the device.
56  *
57  * @kbdev: The kbase device structure for the device (must be a valid
58  *         pointer)
59  * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
60  *
61  * This function can be called by the active power policy to return a bitmask of
62  * the cores (of a specified type) present in the GPU device and also a count of
63  * the number of cores.
64  *
65  * Return: The bit mask of cores present
66  */
67 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
68 						enum kbase_pm_core_type type);
69 
70 /**
71  * kbase_pm_get_active_cores - Get details of the cores that are currently
72  *                             active in the device.
73  *
74  * @kbdev: The kbase device structure for the device (must be a valid pointer)
75  * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
76  *
77  * This function can be called by the active power policy to return a bitmask of
78  * the cores (of a specified type) that are actively processing work (i.e.
79  * turned on *and* busy).
80  *
81  * Return: The bit mask of active cores
82  */
83 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
84 						enum kbase_pm_core_type type);
85 
86 /**
87  * kbase_pm_get_trans_cores - Get details of the cores that are currently
88  *                            transitioning between power states.
89  *
90  * @kbdev: The kbase device structure for the device (must be a valid pointer)
91  * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
92  *
93  * This function can be called by the active power policy to return a bitmask of
94  * the cores (of a specified type) that are currently transitioning between
95  * power states.
96  *
97  * Return: The bit mask of transitioning cores
98  */
99 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
100 						enum kbase_pm_core_type type);
101 
102 /**
103  * kbase_pm_get_ready_cores - Get details of the cores that are currently
104  *                            powered and ready for jobs.
105  *
106  * @kbdev: The kbase device structure for the device (must be a valid pointer)
107  * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
108  *
109  * This function can be called by the active power policy to return a bitmask of
110  * the cores (of a specified type) that are powered and ready for jobs (they may
111  * or may not be currently executing jobs).
112  *
113  * Return: The bit mask of ready cores
114  */
115 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
116 						enum kbase_pm_core_type type);
117 
118 /**
119  * kbase_pm_clock_on - Turn the clock for the device on, and enable device
120  *                     interrupts.
121  *
122  * @kbdev:     The kbase device structure for the device (must be a valid
123  *             pointer)
124  * @is_resume: true if clock on due to resume after suspend, false otherwise
125  *
126  * This function can be used by a power policy to turn the clock for the GPU on.
127  * It should be modified during integration to perform the necessary actions to
128  * ensure that the GPU is fully powered and clocked.
129  */
130 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
131 
132 /**
133  * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
134  *                      device off.
135  *
136  * @kbdev:      The kbase device structure for the device (must be a valid
137  *              pointer)
138  *
139  * This function can be used by a power policy to turn the clock for the GPU
140  * off. It should be modified during integration to perform the necessary
141  * actions to turn the clock off (if this is possible in the integration).
142  *
143  * If runtime PM is enabled and @power_runtime_gpu_idle_callback is used
144  * then this function would usually be invoked from the runtime suspend
145  * callback function.
146  *
147  * Return: true  if clock was turned off, or
148  *         false if clock can not be turned off due to pending page/bus fault
149  *               workers. Caller must flush MMU workqueues and retry
150  */
151 bool kbase_pm_clock_off(struct kbase_device *kbdev);
152 
153 /**
154  * kbase_pm_enable_interrupts - Enable interrupts on the device.
155  *
156  * @kbdev: The kbase device structure for the device (must be a valid pointer)
157  *
158  * Interrupts are also enabled after a call to kbase_pm_clock_on().
159  */
160 void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
161 
162 /**
163  * kbase_pm_disable_interrupts - Disable interrupts on the device.
164  *
165  * @kbdev: The kbase device structure for the device (must be a valid pointer)
166  *
167  * This prevents delivery of Power Management interrupts to the CPU so that
168  * kbase_pm_update_state() will not be called from the IRQ handler
169  * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
170  *
171  * Interrupts are also disabled after a call to kbase_pm_clock_off().
172  */
173 void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
174 
175 /**
176  * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
177  *                                      that does not take the hwaccess_lock
178  *
179  * @kbdev: The kbase device structure for the device (must be a valid pointer)
180  *
181  * Caller must hold the hwaccess_lock.
182  */
183 void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
184 
185 /**
186  * kbase_pm_init_hw - Initialize the hardware.
187  * @kbdev: The kbase device structure for the device (must be a valid pointer)
188  * @flags: Flags specifying the type of PM init
189  *
190  * This function checks the GPU ID register to ensure that the GPU is supported
191  * by the driver and performs a reset on the device so that it is in a known
192  * state before the device is used.
193  *
194  * Return: 0 if the device is supported and successfully reset.
195  */
196 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
197 
198 /**
199  * kbase_pm_reset_done - The GPU has been reset successfully.
200  * @kbdev: The kbase device structure for the device (must be a valid pointer)
201  *
202  * This function must be called by the GPU interrupt handler when the
203  * RESET_COMPLETED bit is set. It signals to the power management initialization
204  * code that the GPU has been successfully reset.
205  */
206 void kbase_pm_reset_done(struct kbase_device *kbdev);
207 
208 #if MALI_USE_CSF
209 /**
210  * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
211  *                                   reached
212  * @kbdev: The kbase device structure for the device (must be a valid pointer)
213  *
214  * Wait for the L2 and MCU state machines to reach the states corresponding
215  * to the values of 'kbase_pm_is_l2_desired' and 'kbase_pm_is_mcu_desired'.
216  *
217  * The usual use-case for this is to ensure that all parts of GPU have been
218  * powered up after performing a GPU Reset.
219  *
220  * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
221  * because this function will take that lock itself.
222  *
223  * NOTE: This may not wait until the correct state is reached if there is a
224  * power off in progress and kbase_pm_context_active() was called instead of
225  * kbase_csf_scheduler_pm_active().
226  *
227  * Return: 0 on success, error code on error
228  */
229 int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
230 #else
231 /**
232  * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
233  *                                   reached
234  * @kbdev: The kbase device structure for the device (must be a valid pointer)
235  *
236  * Wait for the L2 and shader power state machines to reach the states
237  * corresponding to the values of 'l2_desired' and 'shaders_desired'.
238  *
239  * The usual use-case for this is to ensure cores are 'READY' after performing
240  * a GPU Reset.
241  *
242  * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
243  * because this function will take that lock itself.
244  *
245  * NOTE: This may not wait until the correct state is reached if there is a
246  * power off in progress. To correctly wait for the desired state the caller
247  * must ensure that this is not the case by, for example, calling
248  * kbase_pm_wait_for_poweroff_work_complete()
249  *
250  * Return: 0 on success, error code on error
251  */
252 int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
253 #endif
254 
255 /**
256  * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
257  *
258  * @kbdev: The kbase device structure for the device (must be a valid pointer)
259  *
260  * Wait for the L2 to be powered on, and for the L2 and the state machines of
261  * its dependent stack components to stabilise.
262  *
263  * kbdev->pm.active_count must be non-zero when calling this function.
264  *
265  * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
266  * because this function will take that lock itself.
267  *
268  * Return: 0 on success, error code on error
269  */
270 int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
271 
272 #if MALI_USE_CSF
273 /**
274  * kbase_pm_wait_for_cores_down_scale - Wait for the downscaling of shader cores
275  *
276  * @kbdev: The kbase device structure for the device (must be a valid pointer)
277  *
278  * This function can be called to ensure that the downscaling of cores is
279  * effectively complete and it would be safe to lower the voltage.
280  * The function assumes that caller had exercised the MCU state machine for the
281  * downscale request through the kbase_pm_update_state() function.
282  *
283  * This function needs to be used by the caller to safely wait for the completion
284  * of downscale request, instead of kbase_pm_wait_for_desired_state().
285  * The downscale request would trigger a state change in MCU state machine
286  * and so when MCU reaches the stable ON state, it can be inferred that
287  * downscaling is complete. But it has been observed that the wake up of the
288  * waiting thread can get delayed by few milli seconds and by the time the
289  * thread wakes up the power down transition could have started (after the
290  * completion of downscale request).
291  * On the completion of power down transition another wake up signal would be
292  * sent, but again by the time thread wakes up the power up transition can begin.
293  * And the power up transition could then get blocked inside the platform specific
294  * callback_power_on() function due to the thread that called into Kbase (from the
295  * platform specific code) to perform the downscaling and then ended up waiting
296  * for the completion of downscale request.
297  *
298  * Return: 0 on success, error code on error or remaining jiffies on timeout.
299  */
300 int kbase_pm_wait_for_cores_down_scale(struct kbase_device *kbdev);
301 #endif
302 
303 /**
304  * kbase_pm_update_dynamic_cores_onoff - Update the L2 and shader power state
305  *                                       machines after changing shader core
306  *                                       availability
307  * @kbdev: The kbase device structure for the device (must be a valid pointer)
308  *
309  * It can be called in any status, so need to check the l2 and shader core
310  * power status in this function or it will break shader/l2 state machine
311  *
312  * Caller must hold hwaccess_lock
313  */
314 void kbase_pm_update_dynamic_cores_onoff(struct kbase_device *kbdev);
315 
316 /**
317  * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
318  *                                      where the caller must hold
319  *                                      kbase_device.hwaccess_lock
320  *
321  * @kbdev: The kbase device structure for the device (must be a valid pointer)
322  */
323 void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
324 
325 /**
326  * kbase_pm_update_state - Update the L2 and shader power state machines
327  * @kbdev: Device pointer
328  */
329 void kbase_pm_update_state(struct kbase_device *kbdev);
330 
331 /**
332  * kbase_pm_state_machine_init - Initialize the state machines, primarily the
333  *                               shader poweroff timer
334  * @kbdev: Device pointer
335  *
336  * Return: 0 on success, error code on error
337  */
338 int kbase_pm_state_machine_init(struct kbase_device *kbdev);
339 
340 /**
341  * kbase_pm_state_machine_term - Clean up the PM state machines' data
342  * @kbdev: Device pointer
343  */
344 void kbase_pm_state_machine_term(struct kbase_device *kbdev);
345 
346 /**
347  * kbase_pm_update_cores_state - Update the desired state of shader cores from
348  *                               the Power Policy, and begin any power
349  *                               transitions.
350  * @kbdev: The kbase device structure for the device (must be a valid pointer)
351  *
352  * This function will update the desired_xx_state members of
353  * struct kbase_pm_device_data by calling into the current Power Policy. It will
354  * then begin power transitions to make the hardware acheive the desired shader
355  * core state.
356  */
357 void kbase_pm_update_cores_state(struct kbase_device *kbdev);
358 
359 /**
360  * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
361  * @kbdev: The kbase device structure for the device (must be a valid pointer)
362  *
363  * This must be called before other metric gathering APIs are called.
364  *
365  *
366  * Return: 0 on success, error code on error
367  */
368 int kbasep_pm_metrics_init(struct kbase_device *kbdev);
369 
370 /**
371  * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
372  * @kbdev: The kbase device structure for the device (must be a valid pointer)
373  *
374  * This must be called when metric gathering is no longer required. It is an
375  * error to call any metrics gathering function (other than
376  * kbasep_pm_metrics_init()) after calling this function.
377  */
378 void kbasep_pm_metrics_term(struct kbase_device *kbdev);
379 
380 /**
381  * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
382  *                         update the vsync metric.
383  * @kbdev:          The kbase device structure for the device (must be a
384  *                  valid pointer)
385  * @buffer_updated: True if the buffer has been updated on this VSync,
386  *                  false otherwise
387  *
388  * This function should be called by the frame buffer driver to update whether
389  * the system is hitting the vsync target or not. buffer_updated should be true
390  * if the vsync corresponded with a new frame being displayed, otherwise it
391  * should be false. This function does not need to be called every vsync, but
392  * only when the value of @buffer_updated differs from a previous call.
393  */
394 void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
395 
396 /**
397  * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
398  *                            the clock speed of the GPU.
399  *
400  * @kbdev: The kbase device structure for the device (must be a valid pointer)
401  *
402  * This function should be called regularly by the DVFS system to check whether
403  * the clock speed of the GPU needs updating.
404  */
405 void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
406 
407 /**
408  * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
409  *                                      needed
410  * @kbdev: The kbase device structure for the device (must be a valid pointer)
411  *
412  * If the caller is the first caller then the GPU cycle counters will be enabled
413  * along with the l2 cache
414  *
415  * The GPU must be powered when calling this function (i.e.
416  * kbase_pm_context_active() must have been called).
417  *
418  */
419 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
420 
421 /**
422  * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
423  *                                               needed (l2 cache already on)
424  * @kbdev: The kbase device structure for the device (must be a valid pointer)
425  *
426  * This is a version of the above function
427  * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
428  * l2 cache is known to be on and assured to be on until the subsequent call of
429  * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
430  * not sleep and can be called from atomic functions.
431  *
432  * The GPU must be powered when calling this function (i.e.
433  * kbase_pm_context_active() must have been called) and the l2 cache must be
434  * powered on.
435  */
436 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
437 
438 /**
439  * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
440  *                                      longer in use
441  * @kbdev: The kbase device structure for the device (must be a valid pointer)
442  *
443  * If the caller is the last caller then the GPU cycle counters will be
444  * disabled. A request must have been made before a call to this.
445  *
446  * Caller must not hold the hwaccess_lock, as it will be taken in this function.
447  * If the caller is already holding this lock then
448  * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
449  */
450 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
451 
452 /**
453  * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
454  *                                             that does not take hwaccess_lock
455  * @kbdev: The kbase device structure for the device (must be a valid pointer)
456  *
457  * Caller must hold the hwaccess_lock.
458  */
459 void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
460 
461 /**
462  * kbase_pm_wait_for_poweroff_work_complete - Wait for the poweroff workqueue to
463  *                                            complete
464  *
465  * @kbdev: The kbase device structure for the device (must be a valid pointer)
466  *
467  * This function effectively just waits for the @gpu_poweroff_wait_work work
468  * item to complete, if it was enqueued. GPU may not have been powered down
469  * before this function returns.
470  */
471 void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev);
472 
473 /**
474  * kbase_pm_wait_for_gpu_power_down - Wait for the GPU power down to complete
475  *
476  * @kbdev: The kbase device structure for the device (must be a valid pointer)
477  *
478  * This function waits for the actual gpu power down to complete.
479  */
480 void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev);
481 
482 /**
483  * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
484  * @kbdev: The kbase device structure for the device (must be a valid pointer)
485  *
486  * Setup the power management callbacks and initialize/enable the runtime-pm
487  * for the Mali GPU platform device, using the callback function. This must be
488  * called before the kbase_pm_register_access_enable() function.
489  *
490  * Return: 0 on success, error code on error
491  */
492 int kbase_pm_runtime_init(struct kbase_device *kbdev);
493 
494 /**
495  * kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device
496  *
497  * @kbdev: The kbase device structure for the device (must be a valid pointer)
498  */
499 void kbase_pm_runtime_term(struct kbase_device *kbdev);
500 
501 /**
502  * kbase_pm_register_access_enable - Enable access to GPU registers
503  * @kbdev: The kbase device structure for the device (must be a valid pointer)
504  *
505  * Enables access to the GPU registers before power management has powered up
506  * the GPU with kbase_pm_powerup().
507  *
508  * This results in the power management callbacks provided in the driver
509  * configuration to get called to turn on power and/or clocks to the GPU. See
510  * kbase_pm_callback_conf.
511  *
512  * This should only be used before power management is powered up with
513  * kbase_pm_powerup()
514  */
515 void kbase_pm_register_access_enable(struct kbase_device *kbdev);
516 
517 /**
518  * kbase_pm_register_access_disable - Disable early register access
519  * @kbdev: The kbase device structure for the device (must be a valid pointer)
520  *
521  * Disables access to the GPU registers enabled earlier by a call to
522  * kbase_pm_register_access_enable().
523  *
524  * This results in the power management callbacks provided in the driver
525  * configuration to get called to turn off power and/or clocks to the GPU. See
526  * kbase_pm_callback_conf
527  *
528  * This should only be used before power management is powered up with
529  * kbase_pm_powerup()
530  */
531 void kbase_pm_register_access_disable(struct kbase_device *kbdev);
532 
533 /* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
534  * function
535  */
536 
537 /**
538  * kbase_pm_metrics_is_active - Check if the power management metrics
539  *                              collection is active.
540  * @kbdev: The kbase device structure for the device (must be a valid pointer)
541  *
542  * Note that this returns if the power management metrics collection was
543  * active at the time of calling, it is possible that after the call the metrics
544  * collection enable may have changed state.
545  *
546  * The caller must handle the consequence that the state may have changed.
547  *
548  * Return: true if metrics collection was active else false.
549  */
550 bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
551 
552 /**
553  * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
554  *
555  * @kbdev:     The kbase device structure for the device (must be a valid
556  *             pointer)
557  * @is_resume: true if power on due to resume after suspend,
558  *             false otherwise
559  */
560 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
561 
562 /**
563  * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
564  *                        requested.
565  *
566  * @kbdev:      The kbase device structure for the device (must be a valid
567  *              pointer)
568  */
569 void kbase_pm_do_poweroff(struct kbase_device *kbdev);
570 
571 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS)
572 void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
573 			       struct kbasep_pm_metrics *last,
574 			       struct kbasep_pm_metrics *diff);
575 #endif /* defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS) */
576 
577 #ifdef CONFIG_MALI_BIFROST_DVFS
578 
579 #if MALI_USE_CSF
580 /**
581  * kbase_platform_dvfs_event - Report utilisation to DVFS code for CSF GPU
582  *
583  * @kbdev:         The kbase device structure for the device (must be a
584  *                 valid pointer)
585  * @utilisation:   The current calculated utilisation by the metrics system.
586  *
587  * Function provided by platform specific code when DVFS is enabled to allow
588  * the power management metrics system to report utilisation.
589  *
590  * Return:         Returns 0 on failure and non zero on success.
591  */
592 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation);
593 #else
594 /**
595  * kbase_platform_dvfs_event - Report utilisation to DVFS code for JM GPU
596  *
597  * @kbdev:         The kbase device structure for the device (must be a
598  *                 valid pointer)
599  * @utilisation:   The current calculated utilisation by the metrics system.
600  * @util_gl_share: The current calculated gl share of utilisation.
601  * @util_cl_share: The current calculated cl share of utilisation per core
602  *                 group.
603  * Function provided by platform specific code when DVFS is enabled to allow
604  * the power management metrics system to report utilisation.
605  *
606  * Return:         Returns 0 on failure and non zero on success.
607  */
608 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
609 			      u32 util_gl_share, u32 util_cl_share[2]);
610 #endif
611 
612 #endif /* CONFIG_MALI_BIFROST_DVFS */
613 
614 void kbase_pm_power_changed(struct kbase_device *kbdev);
615 
616 /**
617  * kbase_pm_metrics_update - Inform the metrics system that an atom is either
618  *                           about to be run or has just completed.
619  * @kbdev: The kbase device structure for the device (must be a valid pointer)
620  * @now:   Pointer to the timestamp of the change, or NULL to use current time
621  *
622  * Caller must hold hwaccess_lock
623  */
624 void kbase_pm_metrics_update(struct kbase_device *kbdev,
625 				ktime_t *now);
626 
627 /**
628  * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
629  * If the GPU does not have coherency this is a no-op
630  * @kbdev:	Device pointer
631  *
632  * This function should be called after L2 power up.
633  */
634 
635 void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
636 
637 /**
638  * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
639  * If the GPU does not have coherency this is a no-op
640  * @kbdev:	Device pointer
641  *
642  * This function should be called before L2 power off.
643  */
644 void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
645 
646 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
647 /**
648  * kbase_devfreq_set_core_mask - Set devfreq core mask
649  * @kbdev:     Device pointer
650  * @core_mask: New core mask
651  *
652  * This function is used by devfreq to change the available core mask as
653  * required by Dynamic Core Scaling.
654  */
655 void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
656 #endif
657 
658 /**
659  * kbase_pm_reset_start_locked - Signal that GPU reset has started
660  * @kbdev: Device pointer
661  *
662  * Normal power management operation will be suspended until the reset has
663  * completed.
664  *
665  * Caller must hold hwaccess_lock.
666  */
667 void kbase_pm_reset_start_locked(struct kbase_device *kbdev);
668 
669 /**
670  * kbase_pm_reset_complete - Signal that GPU reset has completed
671  * @kbdev: Device pointer
672  *
673  * Normal power management operation will be resumed. The power manager will
674  * re-evaluate what cores are needed and power on or off as required.
675  */
676 void kbase_pm_reset_complete(struct kbase_device *kbdev);
677 
678 #if !MALI_USE_CSF
679 /**
680  * kbase_pm_protected_override_enable - Enable the protected mode override
681  * @kbdev: Device pointer
682  *
683  * When the protected mode override is enabled, all shader cores are requested
684  * to power down, and the L2 power state can be controlled by
685  * kbase_pm_protected_l2_override().
686  *
687  * Caller must hold hwaccess_lock.
688  */
689 void kbase_pm_protected_override_enable(struct kbase_device *kbdev);
690 
691 /**
692  * kbase_pm_protected_override_disable - Disable the protected mode override
693  * @kbdev: Device pointer
694  *
695  * Caller must hold hwaccess_lock.
696  */
697 void kbase_pm_protected_override_disable(struct kbase_device *kbdev);
698 
699 /**
700  * kbase_pm_protected_l2_override - Control the protected mode L2 override
701  * @kbdev: Device pointer
702  * @override: true to enable the override, false to disable
703  *
704  * When the driver is transitioning in or out of protected mode, the L2 cache is
705  * forced to power off. This can be overridden to force the L2 cache to power
706  * on. This is required to change coherency settings on some GPUs.
707  */
708 void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override);
709 
710 /**
711  * kbase_pm_protected_entry_override_enable - Enable the protected mode entry
712  *                                            override
713  * @kbdev: Device pointer
714  *
715  * Initiate a GPU reset and enable the protected mode entry override flag if
716  * l2_always_on WA is enabled and platform is fully coherent. If the GPU
717  * reset is already ongoing then protected mode entry override flag will not
718  * be enabled and function will have to be called again.
719  *
720  * When protected mode entry override flag is enabled to power down L2 via GPU
721  * reset, the GPU reset handling behavior gets changed. For example call to
722  * kbase_backend_reset() is skipped, Hw counters are not re-enabled and L2
723  * isn't powered up again post reset.
724  * This is needed only as a workaround for a Hw issue where explicit power down
725  * of L2 causes a glitch. For entering protected mode on fully coherent
726  * platforms L2 needs to be powered down to switch to IO coherency mode, so to
727  * avoid the glitch GPU reset is used to power down L2. Hence, this function
728  * does nothing on systems where the glitch issue isn't present.
729  *
730  * Caller must hold hwaccess_lock. Should be only called during the transition
731  * to enter protected mode.
732  *
733  * Return: -EAGAIN if a GPU reset was required for the glitch workaround but
734  * was already ongoing, otherwise 0.
735  */
736 int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev);
737 
738 /**
739  * kbase_pm_protected_entry_override_disable - Disable the protected mode entry
740  *                                             override
741  * @kbdev: Device pointer
742  *
743  * This shall be called once L2 has powered down and switch to IO coherency
744  * mode has been made. As with kbase_pm_protected_entry_override_enable(),
745  * this function does nothing on systems where the glitch issue isn't present.
746  *
747  * Caller must hold hwaccess_lock. Should be only called during the transition
748  * to enter protected mode.
749  */
750 void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev);
751 #endif
752 
753 /* If true, the driver should explicitly control corestack power management,
754  * instead of relying on the Power Domain Controller.
755  */
756 extern bool corestack_driver_control;
757 
758 /**
759  * kbase_pm_is_l2_desired - Check whether l2 is desired
760  *
761  * @kbdev: Device pointer
762  *
763  * This shall be called to check whether l2 is needed to power on
764  *
765  * Return: true if l2 need to power on
766  */
767 bool kbase_pm_is_l2_desired(struct kbase_device *kbdev);
768 
769 #if MALI_USE_CSF
770 /**
771  * kbase_pm_is_mcu_desired - Check whether MCU is desired
772  *
773  * @kbdev: Device pointer
774  *
775  * This shall be called to check whether MCU needs to be enabled.
776  *
777  * Return: true if MCU needs to be enabled.
778  */
779 bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev);
780 
781 /**
782  * kbase_pm_is_mcu_inactive - Check if the MCU is inactive (i.e. either
783  *                            it is disabled or it is in sleep)
784  *
785  * @kbdev: kbase device
786  * @state: state of the MCU state machine.
787  *
788  * This function must be called with hwaccess_lock held.
789  * L2 cache can be turned off if this function returns true.
790  *
791  * Return: true if MCU is inactive
792  */
793 bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev,
794 			      enum kbase_mcu_state state);
795 
796 /**
797  * kbase_pm_idle_groups_sched_suspendable - Check whether the scheduler can be
798  *                                        suspended to low power state when all
799  *                                        the CSGs are idle
800  *
801  * @kbdev: Device pointer
802  *
803  * Return: true if allowed to enter the suspended state.
804  */
805 static inline
kbase_pm_idle_groups_sched_suspendable(struct kbase_device * kbdev)806 bool kbase_pm_idle_groups_sched_suspendable(struct kbase_device *kbdev)
807 {
808 	lockdep_assert_held(&kbdev->hwaccess_lock);
809 
810 	return !(kbdev->pm.backend.csf_pm_sched_flags &
811 		 CSF_DYNAMIC_PM_SCHED_IGNORE_IDLE);
812 }
813 
814 /**
815  * kbase_pm_no_runnables_sched_suspendable - Check whether the scheduler can be
816  *                                        suspended to low power state when
817  *                                        there are no runnable CSGs.
818  *
819  * @kbdev: Device pointer
820  *
821  * Return: true if allowed to enter the suspended state.
822  */
823 static inline
kbase_pm_no_runnables_sched_suspendable(struct kbase_device * kbdev)824 bool kbase_pm_no_runnables_sched_suspendable(struct kbase_device *kbdev)
825 {
826 	lockdep_assert_held(&kbdev->hwaccess_lock);
827 
828 	return !(kbdev->pm.backend.csf_pm_sched_flags &
829 		 CSF_DYNAMIC_PM_SCHED_NO_SUSPEND);
830 }
831 
832 /**
833  * kbase_pm_no_mcu_core_pwroff - Check whether the PM is required to keep the
834  *                               MCU shader Core powered in accordance to the active
835  *                               power management policy
836  *
837  * @kbdev: Device pointer
838  *
839  * Return: true if the MCU is to retain powered.
840  */
kbase_pm_no_mcu_core_pwroff(struct kbase_device * kbdev)841 static inline bool kbase_pm_no_mcu_core_pwroff(struct kbase_device *kbdev)
842 {
843 	lockdep_assert_held(&kbdev->hwaccess_lock);
844 
845 	return kbdev->pm.backend.csf_pm_sched_flags &
846 		CSF_DYNAMIC_PM_CORE_KEEP_ON;
847 }
848 
849 /**
850  * kbase_pm_mcu_is_in_desired_state - Check if MCU is in stable ON/OFF state.
851  *
852  * @kbdev: Device pointer
853  *
854  * Return: true if MCU is in stable ON/OFF state.
855  */
kbase_pm_mcu_is_in_desired_state(struct kbase_device * kbdev)856 static inline bool kbase_pm_mcu_is_in_desired_state(struct kbase_device *kbdev)
857 {
858 	bool in_desired_state = true;
859 
860 	if (kbase_pm_is_mcu_desired(kbdev) && kbdev->pm.backend.mcu_state != KBASE_MCU_ON)
861 		in_desired_state = false;
862 	else if (!kbase_pm_is_mcu_desired(kbdev) &&
863 		 (kbdev->pm.backend.mcu_state != KBASE_MCU_OFF) &&
864 		 (kbdev->pm.backend.mcu_state != KBASE_MCU_IN_SLEEP))
865 		in_desired_state = false;
866 
867 	return in_desired_state;
868 }
869 
870 #endif
871 
872 /**
873  * kbase_pm_l2_is_in_desired_state - Check if L2 is in stable ON/OFF state.
874  *
875  * @kbdev: Device pointer
876  *
877  * Return: true if L2 is in stable ON/OFF state.
878  */
kbase_pm_l2_is_in_desired_state(struct kbase_device * kbdev)879 static inline bool kbase_pm_l2_is_in_desired_state(struct kbase_device *kbdev)
880 {
881 	bool in_desired_state = true;
882 
883 	if (kbase_pm_is_l2_desired(kbdev) && kbdev->pm.backend.l2_state != KBASE_L2_ON)
884 		in_desired_state = false;
885 	else if (!kbase_pm_is_l2_desired(kbdev) && kbdev->pm.backend.l2_state != KBASE_L2_OFF)
886 		in_desired_state = false;
887 
888 	return in_desired_state;
889 }
890 
891 /**
892  * kbase_pm_lock - Lock all necessary mutexes to perform PM actions
893  *
894  * @kbdev: Device pointer
895  *
896  * This function locks correct mutexes independent of GPU architecture.
897  */
kbase_pm_lock(struct kbase_device * kbdev)898 static inline void kbase_pm_lock(struct kbase_device *kbdev)
899 {
900 #if !MALI_USE_CSF
901 	mutex_lock(&kbdev->js_data.runpool_mutex);
902 #endif /* !MALI_USE_CSF */
903 	mutex_lock(&kbdev->pm.lock);
904 }
905 
906 /**
907  * kbase_pm_unlock - Unlock mutexes locked by kbase_pm_lock
908  *
909  * @kbdev: Device pointer
910  */
kbase_pm_unlock(struct kbase_device * kbdev)911 static inline void kbase_pm_unlock(struct kbase_device *kbdev)
912 {
913 	mutex_unlock(&kbdev->pm.lock);
914 #if !MALI_USE_CSF
915 	mutex_unlock(&kbdev->js_data.runpool_mutex);
916 #endif /* !MALI_USE_CSF */
917 }
918 
919 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
920 /**
921  * kbase_pm_gpu_sleep_allowed - Check if the GPU is allowed to be put in sleep
922  *
923  * @kbdev: Device pointer
924  *
925  * This function is called on GPU idle notification and if it returns false then
926  * GPU power down will be triggered by suspending the CSGs and halting the MCU.
927  *
928  * Return: true if the GPU is allowed to be in the sleep state.
929  */
kbase_pm_gpu_sleep_allowed(struct kbase_device * kbdev)930 static inline bool kbase_pm_gpu_sleep_allowed(struct kbase_device *kbdev)
931 {
932 	/* If the autosuspend_delay has been set to 0 then it doesn't make
933 	 * sense to first put GPU to sleep state and then power it down,
934 	 * instead would be better to power it down right away.
935 	 * Also need to do the same when autosuspend_delay is set to a negative
936 	 * value, which implies that runtime pm is effectively disabled by the
937 	 * kernel.
938 	 * A high positive value of autosuspend_delay can be used to keep the
939 	 * GPU in sleep state for a long time.
940 	 */
941 	if (unlikely(!kbdev->dev->power.autosuspend_delay ||
942 		     (kbdev->dev->power.autosuspend_delay < 0)))
943 		return false;
944 
945 	return kbdev->pm.backend.gpu_sleep_supported;
946 }
947 
948 /**
949  * kbase_pm_enable_db_mirror_interrupt - Enable the doorbell mirror interrupt to
950  *                                       detect the User doorbell rings.
951  *
952  * @kbdev: Device pointer
953  *
954  * This function is called just before sending the sleep request to MCU firmware
955  * so that User doorbell rings can be detected whilst GPU remains in the sleep
956  * state.
957  *
958  */
kbase_pm_enable_db_mirror_interrupt(struct kbase_device * kbdev)959 static inline void kbase_pm_enable_db_mirror_interrupt(struct kbase_device *kbdev)
960 {
961 	lockdep_assert_held(&kbdev->hwaccess_lock);
962 
963 	if (!kbdev->pm.backend.db_mirror_interrupt_enabled) {
964 		u32 irq_mask = kbase_reg_read(kbdev,
965 				GPU_CONTROL_REG(GPU_IRQ_MASK));
966 
967 		WARN_ON(irq_mask & DOORBELL_MIRROR);
968 
969 		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
970 				irq_mask | DOORBELL_MIRROR);
971 		kbdev->pm.backend.db_mirror_interrupt_enabled = true;
972 	}
973 }
974 
975 /**
976  * kbase_pm_disable_db_mirror_interrupt - Disable the doorbell mirror interrupt.
977  *
978  * @kbdev: Device pointer
979  *
980  * This function is called when doorbell mirror interrupt is received or MCU
981  * needs to be reactivated by enabling the doorbell notification.
982  */
kbase_pm_disable_db_mirror_interrupt(struct kbase_device * kbdev)983 static inline void kbase_pm_disable_db_mirror_interrupt(struct kbase_device *kbdev)
984 {
985 	lockdep_assert_held(&kbdev->hwaccess_lock);
986 
987 	if (kbdev->pm.backend.db_mirror_interrupt_enabled) {
988 		u32 irq_mask = kbase_reg_read(kbdev,
989 				GPU_CONTROL_REG(GPU_IRQ_MASK));
990 
991 		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
992 				irq_mask & ~DOORBELL_MIRROR);
993 		kbdev->pm.backend.db_mirror_interrupt_enabled = false;
994 	}
995 }
996 #endif
997 
998 /**
999  * kbase_pm_l2_allow_mmu_page_migration - L2 state allows MMU page migration or not
1000  *
1001  * @kbdev: The kbase device structure for the device (must be a valid pointer)
1002  *
1003  * Check whether the L2 state is in power transition phase or not. If it is, the MMU
1004  * page migration should be deferred. The caller must hold hwaccess_lock, and, if MMU
1005  * page migration is intended, immediately start the MMU migration action without
1006  * dropping the lock. When page migration begins, a flag is set in kbdev that would
1007  * prevent the L2 state machine traversing into power transition phases, until
1008  * the MMU migration action ends.
1009  *
1010  * Return: true if MMU page migration is allowed
1011  */
kbase_pm_l2_allow_mmu_page_migration(struct kbase_device * kbdev)1012 static inline bool kbase_pm_l2_allow_mmu_page_migration(struct kbase_device *kbdev)
1013 {
1014 	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
1015 
1016 	lockdep_assert_held(&kbdev->hwaccess_lock);
1017 
1018 	return (backend->l2_state != KBASE_L2_PEND_ON && backend->l2_state != KBASE_L2_PEND_OFF);
1019 }
1020 
1021 #endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
1022