xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _KBASE_H_
23 #define _KBASE_H_
24 
25 #include <mali_malisw.h>
26 
27 #include <mali_kbase_debug.h>
28 
29 #include <linux/atomic.h>
30 #include <linux/highmem.h>
31 #include <linux/hrtimer.h>
32 #include <linux/ktime.h>
33 #include <linux/list.h>
34 #include <linux/mm.h>
35 #include <linux/mutex.h>
36 #include <linux/rwsem.h>
37 #include <linux/sched.h>
38 #if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE)
39 #include <linux/sched/mm.h>
40 #endif
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/vmalloc.h>
44 #include <linux/wait.h>
45 #include <linux/workqueue.h>
46 #include <linux/interrupt.h>
47 
48 #include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
49 #include <mali_kbase_linux.h>
50 
51 /*
52  * Include mali_kbase_defs.h first as this provides types needed by other local
53  * header files.
54  */
55 #include "mali_kbase_defs.h"
56 
57 #include "debug/mali_kbase_debug_ktrace.h"
58 #include "context/mali_kbase_context.h"
59 #include "mali_kbase_strings.h"
60 #include "mali_kbase_mem_lowlevel.h"
61 #include "mali_kbase_utility.h"
62 #include "mali_kbase_mem.h"
63 #include "mmu/mali_kbase_mmu.h"
64 #include "mali_kbase_gpu_memory_debugfs.h"
65 #include "mali_kbase_mem_profile_debugfs.h"
66 #include "mali_kbase_gpuprops.h"
67 #include <uapi/gpu/arm/bifrost/mali_kbase_ioctl.h>
68 #if !MALI_USE_CSF
69 #include "mali_kbase_debug_job_fault.h"
70 #include "mali_kbase_jd_debugfs.h"
71 #include "mali_kbase_jm.h"
72 #include "mali_kbase_js.h"
73 #else /* !MALI_USE_CSF */
74 #include "csf/mali_kbase_debug_csf_fault.h"
75 #endif /* MALI_USE_CSF */
76 
77 #include "ipa/mali_kbase_ipa.h"
78 
79 #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
80 #include <trace/events/gpu.h>
81 #endif
82 
83 #include "mali_linux_trace.h"
84 
85 #if MALI_USE_CSF
86 #include "csf/mali_kbase_csf.h"
87 
88 /* Physical memory group ID for CSF user I/O.
89  */
90 #define KBASE_MEM_GROUP_CSF_IO BASE_MEM_GROUP_DEFAULT
91 
92 /* Physical memory group ID for CSF firmware.
93  */
94 #define KBASE_MEM_GROUP_CSF_FW BASE_MEM_GROUP_DEFAULT
95 #endif
96 
97 /* Physical memory group ID for a special page which can alias several regions.
98  */
99 #define KBASE_MEM_GROUP_SINK BASE_MEM_GROUP_DEFAULT
100 
101 /*
102  * Kernel-side Base (KBase) APIs
103  */
104 
105 struct kbase_device *kbase_device_alloc(void);
106 /*
107  * note: configuration attributes member of kbdev needs to have
108  * been setup before calling kbase_device_init
109  */
110 
111 int kbase_device_misc_init(struct kbase_device *kbdev);
112 void kbase_device_misc_term(struct kbase_device *kbdev);
113 
114 #if !MALI_USE_CSF
115 void kbase_enable_quick_reset(struct kbase_device *kbdev);
116 void kbase_disable_quick_reset(struct kbase_device *kbdev);
117 bool kbase_is_quick_reset_enabled(struct kbase_device *kbdev);
118 #endif
119 
120 void kbase_device_free(struct kbase_device *kbdev);
121 int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
122 
123 /* Needed for gator integration and for reporting vsync information */
124 struct kbase_device *kbase_find_device(int minor);
125 void kbase_release_device(struct kbase_device *kbdev);
126 
127 /**
128  * kbase_context_get_unmapped_area() - get an address range which is currently
129  *                                     unmapped.
130  * @kctx: A kernel base context (which has its own GPU address space).
131  * @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
132  *        as Mali GPU driver decides about the mapping).
133  * @len: Length of the address range.
134  * @pgoff: Page offset within the GPU address space of the kbase context.
135  * @flags: Flags for the allocation.
136  *
137  * Finds the unmapped address range which satisfies requirements specific to
138  * GPU and those provided by the call parameters.
139  *
140  * 1) Requirement for allocations greater than 2MB:
141  * - alignment offset is set to 2MB and the alignment mask to 2MB decremented
142  * by 1.
143  *
144  * 2) Requirements imposed for the shader memory alignment:
145  * - alignment is decided by the number of GPU pc bits which can be read from
146  * GPU properties of the device associated with this kbase context; alignment
147  * offset is set to this value in bytes and the alignment mask to the offset
148  * decremented by 1.
149  * - allocations must not to be at 4GB boundaries. Such cases are indicated
150  * by the flag KBASE_REG_GPU_NX not being set (check the flags of the kbase
151  * region). 4GB boundaries can be checked against @ref BASE_MEM_MASK_4GB.
152  *
153  * 3) Requirements imposed for tiler memory alignment, cases indicated by
154  * the flag @ref KBASE_REG_TILER_ALIGN_TOP (check the flags of the kbase
155  * region):
156  * - alignment offset is set to the difference between the kbase region
157  * extension (converted from the original value in pages to bytes) and the kbase
158  * region initial_commit (also converted from the original value in pages to
159  * bytes); alignment mask is set to the kbase region extension in bytes and
160  * decremented by 1.
161  *
162  * Return: if successful, address of the unmapped area aligned as required;
163  *         error code (negative) in case of failure;
164  */
165 unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx,
166 		const unsigned long addr, const unsigned long len,
167 		const unsigned long pgoff, const unsigned long flags);
168 
169 
170 int assign_irqs(struct kbase_device *kbdev);
171 
172 int kbase_sysfs_init(struct kbase_device *kbdev);
173 void kbase_sysfs_term(struct kbase_device *kbdev);
174 
175 
176 int kbase_protected_mode_init(struct kbase_device *kbdev);
177 void kbase_protected_mode_term(struct kbase_device *kbdev);
178 
179 /**
180  * kbase_device_pm_init() - Performs power management initialization and
181  * Verifies device tree configurations.
182  * @kbdev: The kbase device structure for the device (must be a valid pointer)
183  *
184  * Return: 0 if successful, otherwise a standard Linux error code
185  */
186 int kbase_device_pm_init(struct kbase_device *kbdev);
187 
188 /**
189  * kbase_device_pm_term() - Performs power management deinitialization and
190  * Free resources.
191  * @kbdev: The kbase device structure for the device (must be a valid pointer)
192  *
193  * Clean up all the resources
194  */
195 void kbase_device_pm_term(struct kbase_device *kbdev);
196 
197 
198 int power_control_init(struct kbase_device *kbdev);
199 void power_control_term(struct kbase_device *kbdev);
200 
201 #if IS_ENABLED(CONFIG_DEBUG_FS)
202 void kbase_device_debugfs_term(struct kbase_device *kbdev);
203 int kbase_device_debugfs_init(struct kbase_device *kbdev);
204 #else /* CONFIG_DEBUG_FS */
kbase_device_debugfs_init(struct kbase_device * kbdev)205 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
206 {
207 	return 0;
208 }
209 
kbase_device_debugfs_term(struct kbase_device * kbdev)210 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
211 #endif /* CONFIG_DEBUG_FS */
212 
213 int registers_map(struct kbase_device *kbdev);
214 void registers_unmap(struct kbase_device *kbdev);
215 
216 int kbase_device_coherency_init(struct kbase_device *kbdev);
217 
218 
219 #if !MALI_USE_CSF
220 int kbase_jd_init(struct kbase_context *kctx);
221 void kbase_jd_exit(struct kbase_context *kctx);
222 
223 /**
224  * kbase_jd_submit - Submit atoms to the job dispatcher
225  *
226  * @kctx: The kbase context to submit to
227  * @user_addr: The address in user space of the struct base_jd_atom array
228  * @nr_atoms: The number of atoms in the array
229  * @stride: sizeof(struct base_jd_atom)
230  * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
231  *
232  * Return: 0 on success or error code
233  */
234 int kbase_jd_submit(struct kbase_context *kctx,
235 		void __user *user_addr, u32 nr_atoms, u32 stride,
236 		bool uk6_atom);
237 
238 /**
239  * kbase_jd_done_worker - Handle a job completion
240  * @data: a &struct work_struct
241  *
242  * This function requeues the job from the runpool (if it was soft-stopped or
243  * removed from NEXT registers).
244  *
245  * Removes it from the system if it finished/failed/was cancelled.
246  *
247  * Resolves dependencies to add dependent jobs to the context, potentially
248  * starting them if necessary (which may add more references to the context)
249  *
250  * Releases the reference to the context from the no-longer-running job.
251  *
252  * Handles retrying submission outside of IRQ context if it failed from within
253  * IRQ context.
254  */
255 void kbase_jd_done_worker(struct work_struct *data);
256 
257 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
258 		kbasep_js_atom_done_code done_code);
259 void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
260 void kbase_jd_zap_context(struct kbase_context *kctx);
261 
262 /*
263  * kbase_jd_done_nolock - Perform the necessary handling of an atom that has completed
264  *                  the execution.
265  *
266  * @katom: Pointer to the atom that completed the execution
267  * @post_immediately: Flag indicating that completion event can be posted
268  *                    immediately for @katom and the other atoms depdendent
269  *                    on @katom which also completed execution. The flag is
270  *                    false only for the case where the function is called by
271  *                    kbase_jd_done_worker() on the completion of atom running
272  *                    on the GPU.
273  *
274  * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
275  * is responsible for calling kbase_finish_soft_job *before* calling this function.
276  *
277  * The caller must hold the kbase_jd_context.lock.
278  */
279 bool kbase_jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately);
280 
281 void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
282 void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
283 
284 /**
285  * kbase_job_done - Process completed jobs from job interrupt
286  * @kbdev: Pointer to the kbase device.
287  * @done: Bitmask of done or failed jobs, from JOB_IRQ_STAT register
288  *
289  * This function processes the completed, or failed, jobs from the GPU job
290  * slots, for the bits set in the @done bitmask.
291  *
292  * The hwaccess_lock must be held when calling this function.
293  */
294 void kbase_job_done(struct kbase_device *kbdev, u32 done);
295 
296 /**
297  * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
298  *                                               and soft stop them
299  * @kctx: Pointer to context to check.
300  * @katom: Pointer to priority atom.
301  *
302  * Atoms from @kctx on the same job slot as @katom, which have lower priority
303  * than @katom will be soft stopped and put back in the queue, so that atoms
304  * with higher priority can run.
305  *
306  * The hwaccess_lock must be held when calling this function.
307  */
308 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
309 				struct kbase_jd_atom *katom);
310 
311 /**
312  * kbase_job_slot_softstop_start_rp() - Soft-stop the atom at the start
313  *                                      of a renderpass.
314  * @kctx: Pointer to a kernel base context.
315  * @reg:  Reference of a growable GPU memory region in the same context.
316  *        Takes ownership of the reference if successful.
317  *
318  * Used to switch to incremental rendering if we have nearly run out of
319  * virtual address space in a growable memory region and the atom currently
320  * executing on a job slot is the tiler job chain at the start of a renderpass.
321  *
322  * Return: 0 if successful, otherwise a negative error code.
323  */
324 int kbase_job_slot_softstop_start_rp(struct kbase_context *kctx,
325 		struct kbase_va_region *reg);
326 
327 /**
328  * kbase_job_slot_softstop - Soft-stop the specified job slot
329  *
330  * @kbdev:         The kbase device
331  * @js:            The job slot to soft-stop
332  * @target_katom:  The job that should be soft-stopped (or NULL for any job)
333  * Context:
334  *   The job slot lock must be held when calling this function.
335  *   The job slot must not already be in the process of being soft-stopped.
336  *
337  * Where possible any job in the next register is evicted before the soft-stop.
338  */
339 void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
340 		struct kbase_jd_atom *target_katom);
341 
342 void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, unsigned int js,
343 				     struct kbase_jd_atom *target_katom, u32 sw_flags);
344 
345 /**
346  * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
347  * @kbdev: kbase device
348  * @action: the event which has occurred
349  * @core_reqs: core requirements of the atom
350  * @target_katom: the atom which is being affected
351  *
352  * For a certain soft-stop action, work out whether to enter disjoint
353  * state.
354  *
355  * This does not register multiple disjoint events if the atom has already
356  * started a disjoint period
357  *
358  * @core_reqs can be supplied as 0 if the atom had not started on the hardware
359  * (and so a 'real' soft/hard-stop was not required, but it still interrupted
360  * flow, perhaps on another context)
361  *
362  * kbase_job_check_leave_disjoint() should be used to end the disjoint
363  * state when the soft/hard-stop action is complete
364  */
365 void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
366 		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
367 
368 /**
369  * kbase_job_check_leave_disjoint - potentially leave disjoint state
370  * @kbdev: kbase device
371  * @target_katom: atom which is finishing
372  *
373  * Work out whether to leave disjoint state when finishing an atom that was
374  * originated by kbase_job_check_enter_disjoint().
375  */
376 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
377 		struct kbase_jd_atom *target_katom);
378 
379 #endif /* !MALI_USE_CSF */
380 
381 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
382 #if !MALI_USE_CSF
383 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
384 #endif /* !MALI_USE_CSF */
385 int kbase_event_pending(struct kbase_context *ctx);
386 int kbase_event_init(struct kbase_context *kctx);
387 void kbase_event_close(struct kbase_context *kctx);
388 void kbase_event_cleanup(struct kbase_context *kctx);
389 void kbase_event_wakeup(struct kbase_context *kctx);
390 
391 /**
392  * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
393  *
394  * @kctx:	Pointer to the kbase context within which the JIT
395  *		allocation is to be validated.
396  * @info:	Pointer to struct @base_jit_alloc_info
397  *			which is to be validated.
398  * Return: 0 if jit allocation is valid; negative error code otherwise
399  */
400 int kbasep_jit_alloc_validate(struct kbase_context *kctx,
401 					struct base_jit_alloc_info *info);
402 
403 /**
404  * kbase_jit_retry_pending_alloc() - Retry blocked just-in-time memory
405  *                                   allocations.
406  *
407  * @kctx:	Pointer to the kbase context within which the just-in-time
408  *		memory allocations are to be retried.
409  */
410 void kbase_jit_retry_pending_alloc(struct kbase_context *kctx);
411 
412 /**
413  * kbase_free_user_buffer() - Free memory allocated for struct
414  *		@kbase_debug_copy_buffer.
415  *
416  * @buffer:	Pointer to the memory location allocated for the object
417  *		of the type struct @kbase_debug_copy_buffer.
418  */
kbase_free_user_buffer(struct kbase_debug_copy_buffer * buffer)419 static inline void kbase_free_user_buffer(
420 		struct kbase_debug_copy_buffer *buffer)
421 {
422 	struct page **pages = buffer->extres_pages;
423 	int nr_pages = buffer->nr_extres_pages;
424 
425 	if (pages) {
426 		int i;
427 
428 		for (i = 0; i < nr_pages; i++) {
429 			struct page *pg = pages[i];
430 
431 			if (pg)
432 				put_page(pg);
433 		}
434 		kfree(pages);
435 	}
436 }
437 
438 #if !MALI_USE_CSF
439 int kbase_process_soft_job(struct kbase_jd_atom *katom);
440 int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
441 void kbase_finish_soft_job(struct kbase_jd_atom *katom);
442 void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
443 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
444 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
445 #if IS_ENABLED(CONFIG_SYNC_FILE)
446 void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
447 #endif
448 int kbase_soft_event_update(struct kbase_context *kctx,
449 			    u64 event,
450 			    unsigned char new_status);
451 
452 void kbasep_soft_job_timeout_worker(struct timer_list *timer);
453 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
454 #endif /* !MALI_USE_CSF */
455 
456 void kbasep_as_do_poke(struct work_struct *work);
457 
458 /**
459  * kbase_pm_is_suspending - Check whether a system suspend is in progress,
460  * or has already been suspended
461  *
462  * @kbdev: The kbase device structure for the device
463  *
464  * The caller should ensure that either kbdev->pm.active_count_lock is held, or
465  * a dmb was executed recently (to ensure the value is most
466  * up-to-date). However, without a lock the value could change afterwards.
467  *
468  * Return:
469  * * false if a suspend is not in progress
470  * * !=false otherwise
471  */
kbase_pm_is_suspending(struct kbase_device * kbdev)472 static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
473 {
474 	return kbdev->pm.suspending;
475 }
476 
477 #ifdef CONFIG_MALI_ARBITER_SUPPORT
478 /*
479  * Check whether a gpu lost is in progress
480  *
481  * @kbdev: The kbase device structure for the device (must be a valid pointer)
482  *
483  * Indicates whether a gpu lost has been received and jobs are no longer
484  * being scheduled
485  *
486  * Return: false if gpu is lost
487  * Return: != false otherwise
488  */
kbase_pm_is_gpu_lost(struct kbase_device * kbdev)489 static inline bool kbase_pm_is_gpu_lost(struct kbase_device *kbdev)
490 {
491 	return (atomic_read(&kbdev->pm.gpu_lost) == 0 ? false : true);
492 }
493 
494 /*
495  * Set or clear gpu lost state
496  *
497  * @kbdev: The kbase device structure for the device (must be a valid pointer)
498  * @gpu_lost: true to activate GPU lost state, FALSE is deactive it
499  *
500  * Puts power management code into gpu lost state or takes it out of the
501  * state.  Once in gpu lost state new GPU jobs will no longer be
502  * scheduled.
503  */
kbase_pm_set_gpu_lost(struct kbase_device * kbdev,bool gpu_lost)504 static inline void kbase_pm_set_gpu_lost(struct kbase_device *kbdev,
505 	bool gpu_lost)
506 {
507 	const int new_val = (gpu_lost ? 1 : 0);
508 	const int cur_val = atomic_xchg(&kbdev->pm.gpu_lost, new_val);
509 
510 	if (new_val != cur_val)
511 		KBASE_KTRACE_ADD(kbdev, ARB_GPU_LOST, NULL, new_val);
512 }
513 #endif
514 
515 /**
516  * kbase_pm_is_active - Determine whether the GPU is active
517  *
518  * @kbdev: The kbase device structure for the device (must be a valid pointer)
519  *
520  * This takes into account whether there is an active context reference.
521  *
522  * Return: true if the GPU is active, false otherwise
523  */
kbase_pm_is_active(struct kbase_device * kbdev)524 static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
525 {
526 	return kbdev->pm.active_count > 0;
527 }
528 
529 /**
530  * kbase_pm_lowest_gpu_freq_init() - Find the lowest frequency that the GPU can
531  *                                run as using the device tree, and save this
532  *                                within kbdev.
533  * @kbdev: Pointer to kbase device.
534  *
535  * This function could be called from kbase_clk_rate_trace_manager_init,
536  * but is left separate as it can be called as soon as
537  * dev_pm_opp_of_add_table() has been called to initialize the OPP table,
538  * which occurs in power_control_init().
539  *
540  * Return: 0 in any case.
541  */
542 int kbase_pm_lowest_gpu_freq_init(struct kbase_device *kbdev);
543 
544 /**
545  * kbase_pm_metrics_start - Start the utilization metrics timer
546  * @kbdev: Pointer to the kbase device for which to start the utilization
547  *         metrics calculation thread.
548  *
549  * Start the timer that drives the metrics calculation, runs the custom DVFS.
550  */
551 void kbase_pm_metrics_start(struct kbase_device *kbdev);
552 
553 /**
554  * kbase_pm_metrics_stop - Stop the utilization metrics timer
555  * @kbdev: Pointer to the kbase device for which to stop the utilization
556  *         metrics calculation thread.
557  *
558  * Stop the timer that drives the metrics calculation, runs the custom DVFS.
559  */
560 void kbase_pm_metrics_stop(struct kbase_device *kbdev);
561 
562 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
563 /**
564  * kbase_pm_handle_runtime_suspend - Handle the runtime suspend of GPU
565  *
566  * @kbdev: The kbase device structure for the device (must be a valid pointer)
567  *
568  * This function is called from the runtime suspend callback function for
569  * saving the HW state and powering down GPU, if GPU was in sleep state mode.
570  * It does the following steps
571  * - Powers up the L2 cache and re-activates the MCU.
572  * - Suspend the CSGs
573  * - Halts the MCU
574  * - Powers down the L2 cache.
575  * - Invokes the power_off callback to power down the GPU.
576  *
577  * Return: 0 if the GPU was already powered down or no error was encountered
578  * in the power down, otherwise an error code.
579  */
580 int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev);
581 
582 /**
583  * kbase_pm_force_mcu_wakeup_after_sleep - Force the wake up of MCU from sleep
584  *
585  * @kbdev: The kbase device structure for the device (must be a valid pointer)
586  *
587  * This function forces the wake up of MCU from sleep state and wait for
588  * MCU to become active.
589  * It usually gets called from the runtime suspend callback function.
590  * It also gets called from the GPU reset handler or at the time of system
591  * suspend or when User tries to terminate/suspend the on-slot group.
592  *
593  * Note: @gpu_wakeup_override flag that forces the reactivation of MCU is
594  *       set by this function and it is the caller's responsibility to
595  *       clear the flag.
596  *
597  * Return: 0 if the wake up was successful.
598  */
599 int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev);
600 #endif
601 
602 #if !MALI_USE_CSF
603 /**
604  * kbase_jd_atom_id - Return the atom's ID, as was originally supplied by userspace in
605  * base_jd_atom::atom_number
606  * @kctx:  KBase context pointer
607  * @katom: Atome for which to return ID
608  *
609  * Return: the atom's ID.
610  */
kbase_jd_atom_id(struct kbase_context * kctx,const struct kbase_jd_atom * katom)611 static inline int kbase_jd_atom_id(struct kbase_context *kctx,
612 				   const struct kbase_jd_atom *katom)
613 {
614 	int result;
615 
616 	KBASE_DEBUG_ASSERT(kctx);
617 	KBASE_DEBUG_ASSERT(katom);
618 	KBASE_DEBUG_ASSERT(katom->kctx == kctx);
619 
620 	result = katom - &kctx->jctx.atoms[0];
621 	KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
622 	return result;
623 }
624 
625 /**
626  * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
627  * @kctx: Context pointer
628  * @id:   ID of atom to retrieve
629  *
630  * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
631  */
kbase_jd_atom_from_id(struct kbase_context * kctx,int id)632 static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
633 		struct kbase_context *kctx, int id)
634 {
635 	return &kctx->jctx.atoms[id];
636 }
637 #endif /* !MALI_USE_CSF */
638 
639 /**
640  * kbase_disjoint_init - Initialize the disjoint state
641  *
642  * @kbdev: The kbase device
643  *
644  * The disjoint event count and state are both set to zero.
645  *
646  * Disjoint functions usage:
647  *
648  * The disjoint event count should be incremented whenever a disjoint event occurs.
649  *
650  * There are several cases which are regarded as disjoint behavior. Rather than just increment
651  * the counter during disjoint events we also increment the counter when jobs may be affected
652  * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
653  *
654  * Disjoint state is entered during GPU reset. Increasing the disjoint state also increases
655  * the count of disjoint events.
656  *
657  * The disjoint state is then used to increase the count of disjoint events during job submission
658  * and job completion. Any atom submitted or completed while the disjoint state is greater than
659  * zero is regarded as a disjoint event.
660  *
661  * The disjoint event counter is also incremented immediately whenever a job is soft stopped
662  * and during context creation.
663  *
664  * Return: 0 on success and non-zero value on failure.
665  */
666 void kbase_disjoint_init(struct kbase_device *kbdev);
667 
668 /**
669  * kbase_disjoint_event - Increase the count of disjoint events
670  * called when a disjoint event has happened
671  *
672  * @kbdev: The kbase device
673  */
674 void kbase_disjoint_event(struct kbase_device *kbdev);
675 
676 /**
677  * kbase_disjoint_event_potential - Increase the count of disjoint events
678  * only if the GPU is in a disjoint state
679  *
680  * @kbdev: The kbase device
681  *
682  * This should be called when something happens which could be disjoint if the GPU
683  * is in a disjoint state. The state refcount keeps track of this.
684  */
685 void kbase_disjoint_event_potential(struct kbase_device *kbdev);
686 
687 /**
688  * kbase_disjoint_event_get - Returns the count of disjoint events
689  *
690  * @kbdev: The kbase device
691  * Return: the count of disjoint events
692  */
693 u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
694 
695 /**
696  * kbase_disjoint_state_up - Increment the refcount state indicating that
697  * the GPU is in a disjoint state.
698  *
699  * @kbdev: The kbase device
700  *
701  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
702  * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
703  * should be called
704  */
705 void kbase_disjoint_state_up(struct kbase_device *kbdev);
706 
707 /**
708  * kbase_disjoint_state_down - Decrement the refcount state
709  *
710  * @kbdev: The kbase device
711  *
712  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
713  *
714  * Called after @ref kbase_disjoint_state_up once the disjoint state is over
715  */
716 void kbase_disjoint_state_down(struct kbase_device *kbdev);
717 
718 /**
719  * kbase_device_pcm_dev_init() - Initialize the priority control manager device
720  *
721  * @kbdev: Pointer to the structure for the kbase device
722  *
723  * Pointer to the priority control manager device is retrieved from the device
724  * tree and a reference is taken on the module implementing the callbacks for
725  * priority control manager operations.
726  *
727  * Return: 0 if successful, or an error code on failure
728  */
729 int kbase_device_pcm_dev_init(struct kbase_device *const kbdev);
730 
731 /**
732  * kbase_device_pcm_dev_term() - Performs priority control manager device
733  *                               deinitialization.
734  *
735  * @kbdev: Pointer to the structure for the kbase device
736  *
737  * Reference is released on the module implementing the callbacks for priority
738  * control manager operations.
739  */
740 void kbase_device_pcm_dev_term(struct kbase_device *const kbdev);
741 
742 /**
743  * KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD - If a job is soft stopped
744  * and the number of contexts is >= this value it is reported as a disjoint event
745  */
746 #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
747 
748 #if !defined(UINT64_MAX)
749 	#define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
750 #endif
751 
752 #endif
753