xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mali_kbase.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software and is provided to you under the terms of the
7*4882a593Smuzhiyun  * GNU General Public License version 2 as published by the Free Software
8*4882a593Smuzhiyun  * Foundation, and any use by you of this program is subject to the terms
9*4882a593Smuzhiyun  * of such GNU license.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
12*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*4882a593Smuzhiyun  * GNU General Public License for more details.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
17*4882a593Smuzhiyun  * along with this program; if not, you can access it online at
18*4882a593Smuzhiyun  * http://www.gnu.org/licenses/gpl-2.0.html.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifndef _KBASE_H_
23*4882a593Smuzhiyun #define _KBASE_H_
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <mali_malisw.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <mali_kbase_debug.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/atomic.h>
30*4882a593Smuzhiyun #include <linux/highmem.h>
31*4882a593Smuzhiyun #include <linux/hrtimer.h>
32*4882a593Smuzhiyun #include <linux/ktime.h>
33*4882a593Smuzhiyun #include <linux/list.h>
34*4882a593Smuzhiyun #include <linux/mm.h>
35*4882a593Smuzhiyun #include <linux/mutex.h>
36*4882a593Smuzhiyun #include <linux/rwsem.h>
37*4882a593Smuzhiyun #include <linux/sched.h>
38*4882a593Smuzhiyun #if (KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE)
39*4882a593Smuzhiyun #include <linux/sched/mm.h>
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun #include <linux/slab.h>
42*4882a593Smuzhiyun #include <linux/spinlock.h>
43*4882a593Smuzhiyun #include <linux/vmalloc.h>
44*4882a593Smuzhiyun #include <linux/wait.h>
45*4882a593Smuzhiyun #include <linux/workqueue.h>
46*4882a593Smuzhiyun #include <linux/interrupt.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
49*4882a593Smuzhiyun #include <mali_kbase_linux.h>
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Include mali_kbase_defs.h first as this provides types needed by other local
53*4882a593Smuzhiyun  * header files.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #include "mali_kbase_defs.h"
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #include "debug/mali_kbase_debug_ktrace.h"
58*4882a593Smuzhiyun #include "context/mali_kbase_context.h"
59*4882a593Smuzhiyun #include "mali_kbase_strings.h"
60*4882a593Smuzhiyun #include "mali_kbase_mem_lowlevel.h"
61*4882a593Smuzhiyun #include "mali_kbase_utility.h"
62*4882a593Smuzhiyun #include "mali_kbase_mem.h"
63*4882a593Smuzhiyun #include "mmu/mali_kbase_mmu.h"
64*4882a593Smuzhiyun #include "mali_kbase_gpu_memory_debugfs.h"
65*4882a593Smuzhiyun #include "mali_kbase_mem_profile_debugfs.h"
66*4882a593Smuzhiyun #include "mali_kbase_gpuprops.h"
67*4882a593Smuzhiyun #include <uapi/gpu/arm/bifrost/mali_kbase_ioctl.h>
68*4882a593Smuzhiyun #if !MALI_USE_CSF
69*4882a593Smuzhiyun #include "mali_kbase_debug_job_fault.h"
70*4882a593Smuzhiyun #include "mali_kbase_jd_debugfs.h"
71*4882a593Smuzhiyun #include "mali_kbase_jm.h"
72*4882a593Smuzhiyun #include "mali_kbase_js.h"
73*4882a593Smuzhiyun #else /* !MALI_USE_CSF */
74*4882a593Smuzhiyun #include "csf/mali_kbase_debug_csf_fault.h"
75*4882a593Smuzhiyun #endif /* MALI_USE_CSF */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #include "ipa/mali_kbase_ipa.h"
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
80*4882a593Smuzhiyun #include <trace/events/gpu.h>
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #include "mali_linux_trace.h"
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #if MALI_USE_CSF
86*4882a593Smuzhiyun #include "csf/mali_kbase_csf.h"
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* Physical memory group ID for CSF user I/O.
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun #define KBASE_MEM_GROUP_CSF_IO BASE_MEM_GROUP_DEFAULT
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* Physical memory group ID for CSF firmware.
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun #define KBASE_MEM_GROUP_CSF_FW BASE_MEM_GROUP_DEFAULT
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Physical memory group ID for a special page which can alias several regions.
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun #define KBASE_MEM_GROUP_SINK BASE_MEM_GROUP_DEFAULT
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * Kernel-side Base (KBase) APIs
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun struct kbase_device *kbase_device_alloc(void);
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * note: configuration attributes member of kbdev needs to have
108*4882a593Smuzhiyun  * been setup before calling kbase_device_init
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun int kbase_device_misc_init(struct kbase_device *kbdev);
112*4882a593Smuzhiyun void kbase_device_misc_term(struct kbase_device *kbdev);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #if !MALI_USE_CSF
115*4882a593Smuzhiyun void kbase_enable_quick_reset(struct kbase_device *kbdev);
116*4882a593Smuzhiyun void kbase_disable_quick_reset(struct kbase_device *kbdev);
117*4882a593Smuzhiyun bool kbase_is_quick_reset_enabled(struct kbase_device *kbdev);
118*4882a593Smuzhiyun #endif
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun void kbase_device_free(struct kbase_device *kbdev);
121*4882a593Smuzhiyun int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /* Needed for gator integration and for reporting vsync information */
124*4882a593Smuzhiyun struct kbase_device *kbase_find_device(int minor);
125*4882a593Smuzhiyun void kbase_release_device(struct kbase_device *kbdev);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun  * kbase_context_get_unmapped_area() - get an address range which is currently
129*4882a593Smuzhiyun  *                                     unmapped.
130*4882a593Smuzhiyun  * @kctx: A kernel base context (which has its own GPU address space).
131*4882a593Smuzhiyun  * @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
132*4882a593Smuzhiyun  *        as Mali GPU driver decides about the mapping).
133*4882a593Smuzhiyun  * @len: Length of the address range.
134*4882a593Smuzhiyun  * @pgoff: Page offset within the GPU address space of the kbase context.
135*4882a593Smuzhiyun  * @flags: Flags for the allocation.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Finds the unmapped address range which satisfies requirements specific to
138*4882a593Smuzhiyun  * GPU and those provided by the call parameters.
139*4882a593Smuzhiyun  *
140*4882a593Smuzhiyun  * 1) Requirement for allocations greater than 2MB:
141*4882a593Smuzhiyun  * - alignment offset is set to 2MB and the alignment mask to 2MB decremented
142*4882a593Smuzhiyun  * by 1.
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  * 2) Requirements imposed for the shader memory alignment:
145*4882a593Smuzhiyun  * - alignment is decided by the number of GPU pc bits which can be read from
146*4882a593Smuzhiyun  * GPU properties of the device associated with this kbase context; alignment
147*4882a593Smuzhiyun  * offset is set to this value in bytes and the alignment mask to the offset
148*4882a593Smuzhiyun  * decremented by 1.
149*4882a593Smuzhiyun  * - allocations must not to be at 4GB boundaries. Such cases are indicated
150*4882a593Smuzhiyun  * by the flag KBASE_REG_GPU_NX not being set (check the flags of the kbase
151*4882a593Smuzhiyun  * region). 4GB boundaries can be checked against @ref BASE_MEM_MASK_4GB.
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * 3) Requirements imposed for tiler memory alignment, cases indicated by
154*4882a593Smuzhiyun  * the flag @ref KBASE_REG_TILER_ALIGN_TOP (check the flags of the kbase
155*4882a593Smuzhiyun  * region):
156*4882a593Smuzhiyun  * - alignment offset is set to the difference between the kbase region
157*4882a593Smuzhiyun  * extension (converted from the original value in pages to bytes) and the kbase
158*4882a593Smuzhiyun  * region initial_commit (also converted from the original value in pages to
159*4882a593Smuzhiyun  * bytes); alignment mask is set to the kbase region extension in bytes and
160*4882a593Smuzhiyun  * decremented by 1.
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * Return: if successful, address of the unmapped area aligned as required;
163*4882a593Smuzhiyun  *         error code (negative) in case of failure;
164*4882a593Smuzhiyun  */
165*4882a593Smuzhiyun unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx,
166*4882a593Smuzhiyun 		const unsigned long addr, const unsigned long len,
167*4882a593Smuzhiyun 		const unsigned long pgoff, const unsigned long flags);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun int assign_irqs(struct kbase_device *kbdev);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun int kbase_sysfs_init(struct kbase_device *kbdev);
173*4882a593Smuzhiyun void kbase_sysfs_term(struct kbase_device *kbdev);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun int kbase_protected_mode_init(struct kbase_device *kbdev);
177*4882a593Smuzhiyun void kbase_protected_mode_term(struct kbase_device *kbdev);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  * kbase_device_pm_init() - Performs power management initialization and
181*4882a593Smuzhiyun  * Verifies device tree configurations.
182*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * Return: 0 if successful, otherwise a standard Linux error code
185*4882a593Smuzhiyun  */
186*4882a593Smuzhiyun int kbase_device_pm_init(struct kbase_device *kbdev);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun  * kbase_device_pm_term() - Performs power management deinitialization and
190*4882a593Smuzhiyun  * Free resources.
191*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * Clean up all the resources
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun void kbase_device_pm_term(struct kbase_device *kbdev);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun int power_control_init(struct kbase_device *kbdev);
199*4882a593Smuzhiyun void power_control_term(struct kbase_device *kbdev);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DEBUG_FS)
202*4882a593Smuzhiyun void kbase_device_debugfs_term(struct kbase_device *kbdev);
203*4882a593Smuzhiyun int kbase_device_debugfs_init(struct kbase_device *kbdev);
204*4882a593Smuzhiyun #else /* CONFIG_DEBUG_FS */
kbase_device_debugfs_init(struct kbase_device * kbdev)205*4882a593Smuzhiyun static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
kbase_device_debugfs_term(struct kbase_device * kbdev)210*4882a593Smuzhiyun static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
211*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun int registers_map(struct kbase_device *kbdev);
214*4882a593Smuzhiyun void registers_unmap(struct kbase_device *kbdev);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun int kbase_device_coherency_init(struct kbase_device *kbdev);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun #if !MALI_USE_CSF
220*4882a593Smuzhiyun int kbase_jd_init(struct kbase_context *kctx);
221*4882a593Smuzhiyun void kbase_jd_exit(struct kbase_context *kctx);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun  * kbase_jd_submit - Submit atoms to the job dispatcher
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * @kctx: The kbase context to submit to
227*4882a593Smuzhiyun  * @user_addr: The address in user space of the struct base_jd_atom array
228*4882a593Smuzhiyun  * @nr_atoms: The number of atoms in the array
229*4882a593Smuzhiyun  * @stride: sizeof(struct base_jd_atom)
230*4882a593Smuzhiyun  * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * Return: 0 on success or error code
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun int kbase_jd_submit(struct kbase_context *kctx,
235*4882a593Smuzhiyun 		void __user *user_addr, u32 nr_atoms, u32 stride,
236*4882a593Smuzhiyun 		bool uk6_atom);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun  * kbase_jd_done_worker - Handle a job completion
240*4882a593Smuzhiyun  * @data: a &struct work_struct
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * This function requeues the job from the runpool (if it was soft-stopped or
243*4882a593Smuzhiyun  * removed from NEXT registers).
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * Removes it from the system if it finished/failed/was cancelled.
246*4882a593Smuzhiyun  *
247*4882a593Smuzhiyun  * Resolves dependencies to add dependent jobs to the context, potentially
248*4882a593Smuzhiyun  * starting them if necessary (which may add more references to the context)
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * Releases the reference to the context from the no-longer-running job.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * Handles retrying submission outside of IRQ context if it failed from within
253*4882a593Smuzhiyun  * IRQ context.
254*4882a593Smuzhiyun  */
255*4882a593Smuzhiyun void kbase_jd_done_worker(struct work_struct *data);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
258*4882a593Smuzhiyun 		kbasep_js_atom_done_code done_code);
259*4882a593Smuzhiyun void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
260*4882a593Smuzhiyun void kbase_jd_zap_context(struct kbase_context *kctx);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun  * kbase_jd_done_nolock - Perform the necessary handling of an atom that has completed
264*4882a593Smuzhiyun  *                  the execution.
265*4882a593Smuzhiyun  *
266*4882a593Smuzhiyun  * @katom: Pointer to the atom that completed the execution
267*4882a593Smuzhiyun  * @post_immediately: Flag indicating that completion event can be posted
268*4882a593Smuzhiyun  *                    immediately for @katom and the other atoms depdendent
269*4882a593Smuzhiyun  *                    on @katom which also completed execution. The flag is
270*4882a593Smuzhiyun  *                    false only for the case where the function is called by
271*4882a593Smuzhiyun  *                    kbase_jd_done_worker() on the completion of atom running
272*4882a593Smuzhiyun  *                    on the GPU.
273*4882a593Smuzhiyun  *
274*4882a593Smuzhiyun  * Note that if this is a soft-job that has had kbase_prepare_soft_job called on it then the caller
275*4882a593Smuzhiyun  * is responsible for calling kbase_finish_soft_job *before* calling this function.
276*4882a593Smuzhiyun  *
277*4882a593Smuzhiyun  * The caller must hold the kbase_jd_context.lock.
278*4882a593Smuzhiyun  */
279*4882a593Smuzhiyun bool kbase_jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
282*4882a593Smuzhiyun void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /**
285*4882a593Smuzhiyun  * kbase_job_done - Process completed jobs from job interrupt
286*4882a593Smuzhiyun  * @kbdev: Pointer to the kbase device.
287*4882a593Smuzhiyun  * @done: Bitmask of done or failed jobs, from JOB_IRQ_STAT register
288*4882a593Smuzhiyun  *
289*4882a593Smuzhiyun  * This function processes the completed, or failed, jobs from the GPU job
290*4882a593Smuzhiyun  * slots, for the bits set in the @done bitmask.
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  * The hwaccess_lock must be held when calling this function.
293*4882a593Smuzhiyun  */
294*4882a593Smuzhiyun void kbase_job_done(struct kbase_device *kbdev, u32 done);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun  * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
298*4882a593Smuzhiyun  *                                               and soft stop them
299*4882a593Smuzhiyun  * @kctx: Pointer to context to check.
300*4882a593Smuzhiyun  * @katom: Pointer to priority atom.
301*4882a593Smuzhiyun  *
302*4882a593Smuzhiyun  * Atoms from @kctx on the same job slot as @katom, which have lower priority
303*4882a593Smuzhiyun  * than @katom will be soft stopped and put back in the queue, so that atoms
304*4882a593Smuzhiyun  * with higher priority can run.
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * The hwaccess_lock must be held when calling this function.
307*4882a593Smuzhiyun  */
308*4882a593Smuzhiyun void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
309*4882a593Smuzhiyun 				struct kbase_jd_atom *katom);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun  * kbase_job_slot_softstop_start_rp() - Soft-stop the atom at the start
313*4882a593Smuzhiyun  *                                      of a renderpass.
314*4882a593Smuzhiyun  * @kctx: Pointer to a kernel base context.
315*4882a593Smuzhiyun  * @reg:  Reference of a growable GPU memory region in the same context.
316*4882a593Smuzhiyun  *        Takes ownership of the reference if successful.
317*4882a593Smuzhiyun  *
318*4882a593Smuzhiyun  * Used to switch to incremental rendering if we have nearly run out of
319*4882a593Smuzhiyun  * virtual address space in a growable memory region and the atom currently
320*4882a593Smuzhiyun  * executing on a job slot is the tiler job chain at the start of a renderpass.
321*4882a593Smuzhiyun  *
322*4882a593Smuzhiyun  * Return: 0 if successful, otherwise a negative error code.
323*4882a593Smuzhiyun  */
324*4882a593Smuzhiyun int kbase_job_slot_softstop_start_rp(struct kbase_context *kctx,
325*4882a593Smuzhiyun 		struct kbase_va_region *reg);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun  * kbase_job_slot_softstop - Soft-stop the specified job slot
329*4882a593Smuzhiyun  *
330*4882a593Smuzhiyun  * @kbdev:         The kbase device
331*4882a593Smuzhiyun  * @js:            The job slot to soft-stop
332*4882a593Smuzhiyun  * @target_katom:  The job that should be soft-stopped (or NULL for any job)
333*4882a593Smuzhiyun  * Context:
334*4882a593Smuzhiyun  *   The job slot lock must be held when calling this function.
335*4882a593Smuzhiyun  *   The job slot must not already be in the process of being soft-stopped.
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * Where possible any job in the next register is evicted before the soft-stop.
338*4882a593Smuzhiyun  */
339*4882a593Smuzhiyun void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
340*4882a593Smuzhiyun 		struct kbase_jd_atom *target_katom);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, unsigned int js,
343*4882a593Smuzhiyun 				     struct kbase_jd_atom *target_katom, u32 sw_flags);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun  * kbase_job_check_enter_disjoint - potentiall enter disjoint mode
347*4882a593Smuzhiyun  * @kbdev: kbase device
348*4882a593Smuzhiyun  * @action: the event which has occurred
349*4882a593Smuzhiyun  * @core_reqs: core requirements of the atom
350*4882a593Smuzhiyun  * @target_katom: the atom which is being affected
351*4882a593Smuzhiyun  *
352*4882a593Smuzhiyun  * For a certain soft-stop action, work out whether to enter disjoint
353*4882a593Smuzhiyun  * state.
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * This does not register multiple disjoint events if the atom has already
356*4882a593Smuzhiyun  * started a disjoint period
357*4882a593Smuzhiyun  *
358*4882a593Smuzhiyun  * @core_reqs can be supplied as 0 if the atom had not started on the hardware
359*4882a593Smuzhiyun  * (and so a 'real' soft/hard-stop was not required, but it still interrupted
360*4882a593Smuzhiyun  * flow, perhaps on another context)
361*4882a593Smuzhiyun  *
362*4882a593Smuzhiyun  * kbase_job_check_leave_disjoint() should be used to end the disjoint
363*4882a593Smuzhiyun  * state when the soft/hard-stop action is complete
364*4882a593Smuzhiyun  */
365*4882a593Smuzhiyun void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
366*4882a593Smuzhiyun 		base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun /**
369*4882a593Smuzhiyun  * kbase_job_check_leave_disjoint - potentially leave disjoint state
370*4882a593Smuzhiyun  * @kbdev: kbase device
371*4882a593Smuzhiyun  * @target_katom: atom which is finishing
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  * Work out whether to leave disjoint state when finishing an atom that was
374*4882a593Smuzhiyun  * originated by kbase_job_check_enter_disjoint().
375*4882a593Smuzhiyun  */
376*4882a593Smuzhiyun void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
377*4882a593Smuzhiyun 		struct kbase_jd_atom *target_katom);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun #endif /* !MALI_USE_CSF */
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
382*4882a593Smuzhiyun #if !MALI_USE_CSF
383*4882a593Smuzhiyun int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
384*4882a593Smuzhiyun #endif /* !MALI_USE_CSF */
385*4882a593Smuzhiyun int kbase_event_pending(struct kbase_context *ctx);
386*4882a593Smuzhiyun int kbase_event_init(struct kbase_context *kctx);
387*4882a593Smuzhiyun void kbase_event_close(struct kbase_context *kctx);
388*4882a593Smuzhiyun void kbase_event_cleanup(struct kbase_context *kctx);
389*4882a593Smuzhiyun void kbase_event_wakeup(struct kbase_context *kctx);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
393*4882a593Smuzhiyun  *
394*4882a593Smuzhiyun  * @kctx:	Pointer to the kbase context within which the JIT
395*4882a593Smuzhiyun  *		allocation is to be validated.
396*4882a593Smuzhiyun  * @info:	Pointer to struct @base_jit_alloc_info
397*4882a593Smuzhiyun  *			which is to be validated.
398*4882a593Smuzhiyun  * Return: 0 if jit allocation is valid; negative error code otherwise
399*4882a593Smuzhiyun  */
400*4882a593Smuzhiyun int kbasep_jit_alloc_validate(struct kbase_context *kctx,
401*4882a593Smuzhiyun 					struct base_jit_alloc_info *info);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun  * kbase_jit_retry_pending_alloc() - Retry blocked just-in-time memory
405*4882a593Smuzhiyun  *                                   allocations.
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * @kctx:	Pointer to the kbase context within which the just-in-time
408*4882a593Smuzhiyun  *		memory allocations are to be retried.
409*4882a593Smuzhiyun  */
410*4882a593Smuzhiyun void kbase_jit_retry_pending_alloc(struct kbase_context *kctx);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /**
413*4882a593Smuzhiyun  * kbase_free_user_buffer() - Free memory allocated for struct
414*4882a593Smuzhiyun  *		@kbase_debug_copy_buffer.
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * @buffer:	Pointer to the memory location allocated for the object
417*4882a593Smuzhiyun  *		of the type struct @kbase_debug_copy_buffer.
418*4882a593Smuzhiyun  */
kbase_free_user_buffer(struct kbase_debug_copy_buffer * buffer)419*4882a593Smuzhiyun static inline void kbase_free_user_buffer(
420*4882a593Smuzhiyun 		struct kbase_debug_copy_buffer *buffer)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	struct page **pages = buffer->extres_pages;
423*4882a593Smuzhiyun 	int nr_pages = buffer->nr_extres_pages;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (pages) {
426*4882a593Smuzhiyun 		int i;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++) {
429*4882a593Smuzhiyun 			struct page *pg = pages[i];
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 			if (pg)
432*4882a593Smuzhiyun 				put_page(pg);
433*4882a593Smuzhiyun 		}
434*4882a593Smuzhiyun 		kfree(pages);
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun #if !MALI_USE_CSF
439*4882a593Smuzhiyun int kbase_process_soft_job(struct kbase_jd_atom *katom);
440*4882a593Smuzhiyun int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
441*4882a593Smuzhiyun void kbase_finish_soft_job(struct kbase_jd_atom *katom);
442*4882a593Smuzhiyun void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
443*4882a593Smuzhiyun void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
444*4882a593Smuzhiyun void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
445*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SYNC_FILE)
446*4882a593Smuzhiyun void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
447*4882a593Smuzhiyun #endif
448*4882a593Smuzhiyun int kbase_soft_event_update(struct kbase_context *kctx,
449*4882a593Smuzhiyun 			    u64 event,
450*4882a593Smuzhiyun 			    unsigned char new_status);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun void kbasep_soft_job_timeout_worker(struct timer_list *timer);
453*4882a593Smuzhiyun void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
454*4882a593Smuzhiyun #endif /* !MALI_USE_CSF */
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun void kbasep_as_do_poke(struct work_struct *work);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun  * kbase_pm_is_suspending - Check whether a system suspend is in progress,
460*4882a593Smuzhiyun  * or has already been suspended
461*4882a593Smuzhiyun  *
462*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device
463*4882a593Smuzhiyun  *
464*4882a593Smuzhiyun  * The caller should ensure that either kbdev->pm.active_count_lock is held, or
465*4882a593Smuzhiyun  * a dmb was executed recently (to ensure the value is most
466*4882a593Smuzhiyun  * up-to-date). However, without a lock the value could change afterwards.
467*4882a593Smuzhiyun  *
468*4882a593Smuzhiyun  * Return:
469*4882a593Smuzhiyun  * * false if a suspend is not in progress
470*4882a593Smuzhiyun  * * !=false otherwise
471*4882a593Smuzhiyun  */
kbase_pm_is_suspending(struct kbase_device * kbdev)472*4882a593Smuzhiyun static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	return kbdev->pm.suspending;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun #ifdef CONFIG_MALI_ARBITER_SUPPORT
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun  * Check whether a gpu lost is in progress
480*4882a593Smuzhiyun  *
481*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
482*4882a593Smuzhiyun  *
483*4882a593Smuzhiyun  * Indicates whether a gpu lost has been received and jobs are no longer
484*4882a593Smuzhiyun  * being scheduled
485*4882a593Smuzhiyun  *
486*4882a593Smuzhiyun  * Return: false if gpu is lost
487*4882a593Smuzhiyun  * Return: != false otherwise
488*4882a593Smuzhiyun  */
kbase_pm_is_gpu_lost(struct kbase_device * kbdev)489*4882a593Smuzhiyun static inline bool kbase_pm_is_gpu_lost(struct kbase_device *kbdev)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	return (atomic_read(&kbdev->pm.gpu_lost) == 0 ? false : true);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun  * Set or clear gpu lost state
496*4882a593Smuzhiyun  *
497*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
498*4882a593Smuzhiyun  * @gpu_lost: true to activate GPU lost state, FALSE is deactive it
499*4882a593Smuzhiyun  *
500*4882a593Smuzhiyun  * Puts power management code into gpu lost state or takes it out of the
501*4882a593Smuzhiyun  * state.  Once in gpu lost state new GPU jobs will no longer be
502*4882a593Smuzhiyun  * scheduled.
503*4882a593Smuzhiyun  */
kbase_pm_set_gpu_lost(struct kbase_device * kbdev,bool gpu_lost)504*4882a593Smuzhiyun static inline void kbase_pm_set_gpu_lost(struct kbase_device *kbdev,
505*4882a593Smuzhiyun 	bool gpu_lost)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	const int new_val = (gpu_lost ? 1 : 0);
508*4882a593Smuzhiyun 	const int cur_val = atomic_xchg(&kbdev->pm.gpu_lost, new_val);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (new_val != cur_val)
511*4882a593Smuzhiyun 		KBASE_KTRACE_ADD(kbdev, ARB_GPU_LOST, NULL, new_val);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun #endif
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun  * kbase_pm_is_active - Determine whether the GPU is active
517*4882a593Smuzhiyun  *
518*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
519*4882a593Smuzhiyun  *
520*4882a593Smuzhiyun  * This takes into account whether there is an active context reference.
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Return: true if the GPU is active, false otherwise
523*4882a593Smuzhiyun  */
kbase_pm_is_active(struct kbase_device * kbdev)524*4882a593Smuzhiyun static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	return kbdev->pm.active_count > 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /**
530*4882a593Smuzhiyun  * kbase_pm_lowest_gpu_freq_init() - Find the lowest frequency that the GPU can
531*4882a593Smuzhiyun  *                                run as using the device tree, and save this
532*4882a593Smuzhiyun  *                                within kbdev.
533*4882a593Smuzhiyun  * @kbdev: Pointer to kbase device.
534*4882a593Smuzhiyun  *
535*4882a593Smuzhiyun  * This function could be called from kbase_clk_rate_trace_manager_init,
536*4882a593Smuzhiyun  * but is left separate as it can be called as soon as
537*4882a593Smuzhiyun  * dev_pm_opp_of_add_table() has been called to initialize the OPP table,
538*4882a593Smuzhiyun  * which occurs in power_control_init().
539*4882a593Smuzhiyun  *
540*4882a593Smuzhiyun  * Return: 0 in any case.
541*4882a593Smuzhiyun  */
542*4882a593Smuzhiyun int kbase_pm_lowest_gpu_freq_init(struct kbase_device *kbdev);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun /**
545*4882a593Smuzhiyun  * kbase_pm_metrics_start - Start the utilization metrics timer
546*4882a593Smuzhiyun  * @kbdev: Pointer to the kbase device for which to start the utilization
547*4882a593Smuzhiyun  *         metrics calculation thread.
548*4882a593Smuzhiyun  *
549*4882a593Smuzhiyun  * Start the timer that drives the metrics calculation, runs the custom DVFS.
550*4882a593Smuzhiyun  */
551*4882a593Smuzhiyun void kbase_pm_metrics_start(struct kbase_device *kbdev);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun /**
554*4882a593Smuzhiyun  * kbase_pm_metrics_stop - Stop the utilization metrics timer
555*4882a593Smuzhiyun  * @kbdev: Pointer to the kbase device for which to stop the utilization
556*4882a593Smuzhiyun  *         metrics calculation thread.
557*4882a593Smuzhiyun  *
558*4882a593Smuzhiyun  * Stop the timer that drives the metrics calculation, runs the custom DVFS.
559*4882a593Smuzhiyun  */
560*4882a593Smuzhiyun void kbase_pm_metrics_stop(struct kbase_device *kbdev);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun  * kbase_pm_handle_runtime_suspend - Handle the runtime suspend of GPU
565*4882a593Smuzhiyun  *
566*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
567*4882a593Smuzhiyun  *
568*4882a593Smuzhiyun  * This function is called from the runtime suspend callback function for
569*4882a593Smuzhiyun  * saving the HW state and powering down GPU, if GPU was in sleep state mode.
570*4882a593Smuzhiyun  * It does the following steps
571*4882a593Smuzhiyun  * - Powers up the L2 cache and re-activates the MCU.
572*4882a593Smuzhiyun  * - Suspend the CSGs
573*4882a593Smuzhiyun  * - Halts the MCU
574*4882a593Smuzhiyun  * - Powers down the L2 cache.
575*4882a593Smuzhiyun  * - Invokes the power_off callback to power down the GPU.
576*4882a593Smuzhiyun  *
577*4882a593Smuzhiyun  * Return: 0 if the GPU was already powered down or no error was encountered
578*4882a593Smuzhiyun  * in the power down, otherwise an error code.
579*4882a593Smuzhiyun  */
580*4882a593Smuzhiyun int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /**
583*4882a593Smuzhiyun  * kbase_pm_force_mcu_wakeup_after_sleep - Force the wake up of MCU from sleep
584*4882a593Smuzhiyun  *
585*4882a593Smuzhiyun  * @kbdev: The kbase device structure for the device (must be a valid pointer)
586*4882a593Smuzhiyun  *
587*4882a593Smuzhiyun  * This function forces the wake up of MCU from sleep state and wait for
588*4882a593Smuzhiyun  * MCU to become active.
589*4882a593Smuzhiyun  * It usually gets called from the runtime suspend callback function.
590*4882a593Smuzhiyun  * It also gets called from the GPU reset handler or at the time of system
591*4882a593Smuzhiyun  * suspend or when User tries to terminate/suspend the on-slot group.
592*4882a593Smuzhiyun  *
593*4882a593Smuzhiyun  * Note: @gpu_wakeup_override flag that forces the reactivation of MCU is
594*4882a593Smuzhiyun  *       set by this function and it is the caller's responsibility to
595*4882a593Smuzhiyun  *       clear the flag.
596*4882a593Smuzhiyun  *
597*4882a593Smuzhiyun  * Return: 0 if the wake up was successful.
598*4882a593Smuzhiyun  */
599*4882a593Smuzhiyun int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev);
600*4882a593Smuzhiyun #endif
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun #if !MALI_USE_CSF
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun  * kbase_jd_atom_id - Return the atom's ID, as was originally supplied by userspace in
605*4882a593Smuzhiyun  * base_jd_atom::atom_number
606*4882a593Smuzhiyun  * @kctx:  KBase context pointer
607*4882a593Smuzhiyun  * @katom: Atome for which to return ID
608*4882a593Smuzhiyun  *
609*4882a593Smuzhiyun  * Return: the atom's ID.
610*4882a593Smuzhiyun  */
kbase_jd_atom_id(struct kbase_context * kctx,const struct kbase_jd_atom * katom)611*4882a593Smuzhiyun static inline int kbase_jd_atom_id(struct kbase_context *kctx,
612*4882a593Smuzhiyun 				   const struct kbase_jd_atom *katom)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	int result;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(kctx);
617*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(katom);
618*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(katom->kctx == kctx);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	result = katom - &kctx->jctx.atoms[0];
621*4882a593Smuzhiyun 	KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
622*4882a593Smuzhiyun 	return result;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun /**
626*4882a593Smuzhiyun  * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
627*4882a593Smuzhiyun  * @kctx: Context pointer
628*4882a593Smuzhiyun  * @id:   ID of atom to retrieve
629*4882a593Smuzhiyun  *
630*4882a593Smuzhiyun  * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
631*4882a593Smuzhiyun  */
kbase_jd_atom_from_id(struct kbase_context * kctx,int id)632*4882a593Smuzhiyun static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
633*4882a593Smuzhiyun 		struct kbase_context *kctx, int id)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	return &kctx->jctx.atoms[id];
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun #endif /* !MALI_USE_CSF */
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun /**
640*4882a593Smuzhiyun  * kbase_disjoint_init - Initialize the disjoint state
641*4882a593Smuzhiyun  *
642*4882a593Smuzhiyun  * @kbdev: The kbase device
643*4882a593Smuzhiyun  *
644*4882a593Smuzhiyun  * The disjoint event count and state are both set to zero.
645*4882a593Smuzhiyun  *
646*4882a593Smuzhiyun  * Disjoint functions usage:
647*4882a593Smuzhiyun  *
648*4882a593Smuzhiyun  * The disjoint event count should be incremented whenever a disjoint event occurs.
649*4882a593Smuzhiyun  *
650*4882a593Smuzhiyun  * There are several cases which are regarded as disjoint behavior. Rather than just increment
651*4882a593Smuzhiyun  * the counter during disjoint events we also increment the counter when jobs may be affected
652*4882a593Smuzhiyun  * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
653*4882a593Smuzhiyun  *
654*4882a593Smuzhiyun  * Disjoint state is entered during GPU reset. Increasing the disjoint state also increases
655*4882a593Smuzhiyun  * the count of disjoint events.
656*4882a593Smuzhiyun  *
657*4882a593Smuzhiyun  * The disjoint state is then used to increase the count of disjoint events during job submission
658*4882a593Smuzhiyun  * and job completion. Any atom submitted or completed while the disjoint state is greater than
659*4882a593Smuzhiyun  * zero is regarded as a disjoint event.
660*4882a593Smuzhiyun  *
661*4882a593Smuzhiyun  * The disjoint event counter is also incremented immediately whenever a job is soft stopped
662*4882a593Smuzhiyun  * and during context creation.
663*4882a593Smuzhiyun  *
664*4882a593Smuzhiyun  * Return: 0 on success and non-zero value on failure.
665*4882a593Smuzhiyun  */
666*4882a593Smuzhiyun void kbase_disjoint_init(struct kbase_device *kbdev);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun /**
669*4882a593Smuzhiyun  * kbase_disjoint_event - Increase the count of disjoint events
670*4882a593Smuzhiyun  * called when a disjoint event has happened
671*4882a593Smuzhiyun  *
672*4882a593Smuzhiyun  * @kbdev: The kbase device
673*4882a593Smuzhiyun  */
674*4882a593Smuzhiyun void kbase_disjoint_event(struct kbase_device *kbdev);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun  * kbase_disjoint_event_potential - Increase the count of disjoint events
678*4882a593Smuzhiyun  * only if the GPU is in a disjoint state
679*4882a593Smuzhiyun  *
680*4882a593Smuzhiyun  * @kbdev: The kbase device
681*4882a593Smuzhiyun  *
682*4882a593Smuzhiyun  * This should be called when something happens which could be disjoint if the GPU
683*4882a593Smuzhiyun  * is in a disjoint state. The state refcount keeps track of this.
684*4882a593Smuzhiyun  */
685*4882a593Smuzhiyun void kbase_disjoint_event_potential(struct kbase_device *kbdev);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /**
688*4882a593Smuzhiyun  * kbase_disjoint_event_get - Returns the count of disjoint events
689*4882a593Smuzhiyun  *
690*4882a593Smuzhiyun  * @kbdev: The kbase device
691*4882a593Smuzhiyun  * Return: the count of disjoint events
692*4882a593Smuzhiyun  */
693*4882a593Smuzhiyun u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun /**
696*4882a593Smuzhiyun  * kbase_disjoint_state_up - Increment the refcount state indicating that
697*4882a593Smuzhiyun  * the GPU is in a disjoint state.
698*4882a593Smuzhiyun  *
699*4882a593Smuzhiyun  * @kbdev: The kbase device
700*4882a593Smuzhiyun  *
701*4882a593Smuzhiyun  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
702*4882a593Smuzhiyun  * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
703*4882a593Smuzhiyun  * should be called
704*4882a593Smuzhiyun  */
705*4882a593Smuzhiyun void kbase_disjoint_state_up(struct kbase_device *kbdev);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /**
708*4882a593Smuzhiyun  * kbase_disjoint_state_down - Decrement the refcount state
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * @kbdev: The kbase device
711*4882a593Smuzhiyun  *
712*4882a593Smuzhiyun  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
713*4882a593Smuzhiyun  *
714*4882a593Smuzhiyun  * Called after @ref kbase_disjoint_state_up once the disjoint state is over
715*4882a593Smuzhiyun  */
716*4882a593Smuzhiyun void kbase_disjoint_state_down(struct kbase_device *kbdev);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun /**
719*4882a593Smuzhiyun  * kbase_device_pcm_dev_init() - Initialize the priority control manager device
720*4882a593Smuzhiyun  *
721*4882a593Smuzhiyun  * @kbdev: Pointer to the structure for the kbase device
722*4882a593Smuzhiyun  *
723*4882a593Smuzhiyun  * Pointer to the priority control manager device is retrieved from the device
724*4882a593Smuzhiyun  * tree and a reference is taken on the module implementing the callbacks for
725*4882a593Smuzhiyun  * priority control manager operations.
726*4882a593Smuzhiyun  *
727*4882a593Smuzhiyun  * Return: 0 if successful, or an error code on failure
728*4882a593Smuzhiyun  */
729*4882a593Smuzhiyun int kbase_device_pcm_dev_init(struct kbase_device *const kbdev);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun /**
732*4882a593Smuzhiyun  * kbase_device_pcm_dev_term() - Performs priority control manager device
733*4882a593Smuzhiyun  *                               deinitialization.
734*4882a593Smuzhiyun  *
735*4882a593Smuzhiyun  * @kbdev: Pointer to the structure for the kbase device
736*4882a593Smuzhiyun  *
737*4882a593Smuzhiyun  * Reference is released on the module implementing the callbacks for priority
738*4882a593Smuzhiyun  * control manager operations.
739*4882a593Smuzhiyun  */
740*4882a593Smuzhiyun void kbase_device_pcm_dev_term(struct kbase_device *const kbdev);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun /**
743*4882a593Smuzhiyun  * KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD - If a job is soft stopped
744*4882a593Smuzhiyun  * and the number of contexts is >= this value it is reported as a disjoint event
745*4882a593Smuzhiyun  */
746*4882a593Smuzhiyun #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun #if !defined(UINT64_MAX)
749*4882a593Smuzhiyun 	#define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
750*4882a593Smuzhiyun #endif
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun #endif
753