1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun * of such GNU licence.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun * Boston, MA 02110-1301, USA.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #ifndef _KBASE_H_
21*4882a593Smuzhiyun #define _KBASE_H_
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <mali_malisw.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <mali_kbase_debug.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include <asm/page.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/atomic.h>
30*4882a593Smuzhiyun #include <linux/highmem.h>
31*4882a593Smuzhiyun #include <linux/hrtimer.h>
32*4882a593Smuzhiyun #include <linux/ktime.h>
33*4882a593Smuzhiyun #include <linux/list.h>
34*4882a593Smuzhiyun #include <linux/mm_types.h>
35*4882a593Smuzhiyun #include <linux/mutex.h>
36*4882a593Smuzhiyun #include <linux/rwsem.h>
37*4882a593Smuzhiyun #include <linux/sched.h>
38*4882a593Smuzhiyun #include <linux/slab.h>
39*4882a593Smuzhiyun #include <linux/spinlock.h>
40*4882a593Smuzhiyun #include <linux/vmalloc.h>
41*4882a593Smuzhiyun #include <linux/wait.h>
42*4882a593Smuzhiyun #include <linux/workqueue.h>
43*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "mali_base_kernel.h"
46*4882a593Smuzhiyun #include <mali_kbase_uku.h>
47*4882a593Smuzhiyun #include <mali_kbase_linux.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Include mali_kbase_defs.h first as this provides types needed by other local
51*4882a593Smuzhiyun * header files.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #include "mali_kbase_defs.h"
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #include "mali_kbase_context.h"
56*4882a593Smuzhiyun #include "mali_kbase_strings.h"
57*4882a593Smuzhiyun #include "mali_kbase_mem_lowlevel.h"
58*4882a593Smuzhiyun #include "mali_kbase_trace_timeline.h"
59*4882a593Smuzhiyun #include "mali_kbase_js.h"
60*4882a593Smuzhiyun #include "mali_kbase_mem.h"
61*4882a593Smuzhiyun #include "mali_kbase_utility.h"
62*4882a593Smuzhiyun #include "mali_kbase_gpu_memory_debugfs.h"
63*4882a593Smuzhiyun #include "mali_kbase_mem_profile_debugfs.h"
64*4882a593Smuzhiyun #include "mali_kbase_debug_job_fault.h"
65*4882a593Smuzhiyun #include "mali_kbase_jd_debugfs.h"
66*4882a593Smuzhiyun #include "mali_kbase_gpuprops.h"
67*4882a593Smuzhiyun #include "mali_kbase_jm.h"
68*4882a593Smuzhiyun #include "mali_kbase_vinstr.h"
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #include "ipa/mali_kbase_ipa.h"
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #ifdef CONFIG_GPU_TRACEPOINTS
73*4882a593Smuzhiyun #include <trace/events/gpu.h>
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun * @page page_base_kernel_main Kernel-side Base (KBase) APIs
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun * @defgroup base_kbase_api Kernel-side Base (KBase) APIs
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun struct kbase_device *kbase_device_alloc(void);
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * note: configuration attributes member of kbdev needs to have
86*4882a593Smuzhiyun * been setup before calling kbase_device_init
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * API to acquire device list semaphore and return pointer
91*4882a593Smuzhiyun * to the device list head
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun const struct list_head *kbase_dev_list_get(void);
94*4882a593Smuzhiyun /* API to release the device list semaphore */
95*4882a593Smuzhiyun void kbase_dev_list_put(const struct list_head *dev_list);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun int kbase_device_init(struct kbase_device * const kbdev);
98*4882a593Smuzhiyun void kbase_device_term(struct kbase_device *kbdev);
99*4882a593Smuzhiyun void kbase_device_free(struct kbase_device *kbdev);
100*4882a593Smuzhiyun int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Needed for gator integration and for reporting vsync information */
103*4882a593Smuzhiyun struct kbase_device *kbase_find_device(int minor);
104*4882a593Smuzhiyun void kbase_release_device(struct kbase_device *kbdev);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct kbase_context *
109*4882a593Smuzhiyun kbase_create_context(struct kbase_device *kbdev, bool is_compat);
110*4882a593Smuzhiyun void kbase_destroy_context(struct kbase_context *kctx);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun int kbase_jd_init(struct kbase_context *kctx);
113*4882a593Smuzhiyun void kbase_jd_exit(struct kbase_context *kctx);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun * kbase_jd_submit - Submit atoms to the job dispatcher
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun * @kctx: The kbase context to submit to
119*4882a593Smuzhiyun * @user_addr: The address in user space of the struct base_jd_atom_v2 array
120*4882a593Smuzhiyun * @nr_atoms: The number of atoms in the array
121*4882a593Smuzhiyun * @stride: sizeof(struct base_jd_atom_v2)
122*4882a593Smuzhiyun * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * Return: 0 on success or error code
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun int kbase_jd_submit(struct kbase_context *kctx,
127*4882a593Smuzhiyun void __user *user_addr, u32 nr_atoms, u32 stride,
128*4882a593Smuzhiyun bool uk6_atom);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * kbase_jd_done_worker - Handle a job completion
132*4882a593Smuzhiyun * @data: a &struct work_struct
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * This function requeues the job from the runpool (if it was soft-stopped or
135*4882a593Smuzhiyun * removed from NEXT registers).
136*4882a593Smuzhiyun *
137*4882a593Smuzhiyun * Removes it from the system if it finished/failed/was cancelled.
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun * Resolves dependencies to add dependent jobs to the context, potentially
140*4882a593Smuzhiyun * starting them if necessary (which may add more references to the context)
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * Releases the reference to the context from the no-longer-running job.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * Handles retrying submission outside of IRQ context if it failed from within
145*4882a593Smuzhiyun * IRQ context.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun void kbase_jd_done_worker(struct work_struct *data);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
150*4882a593Smuzhiyun kbasep_js_atom_done_code done_code);
151*4882a593Smuzhiyun void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
152*4882a593Smuzhiyun void kbase_jd_zap_context(struct kbase_context *kctx);
153*4882a593Smuzhiyun bool jd_done_nolock(struct kbase_jd_atom *katom,
154*4882a593Smuzhiyun struct list_head *completed_jobs_ctx);
155*4882a593Smuzhiyun void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
156*4882a593Smuzhiyun bool jd_submit_atom(struct kbase_context *kctx,
157*4882a593Smuzhiyun const struct base_jd_atom_v2 *user_atom,
158*4882a593Smuzhiyun struct kbase_jd_atom *katom);
159*4882a593Smuzhiyun void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun void kbase_job_done(struct kbase_device *kbdev, u32 done);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
165*4882a593Smuzhiyun * and soft stop them
166*4882a593Smuzhiyun * @kctx: Pointer to context to check.
167*4882a593Smuzhiyun * @katom: Pointer to priority atom.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Atoms from @kctx on the same job slot as @katom, which have lower priority
170*4882a593Smuzhiyun * than @katom will be soft stopped and put back in the queue, so that atoms
171*4882a593Smuzhiyun * with higher priority can run.
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * The hwaccess_lock must be held when calling this function.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
176*4882a593Smuzhiyun struct kbase_jd_atom *katom);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun void kbase_job_slot_softstop(struct kbase_device *kbdev, int js,
179*4882a593Smuzhiyun struct kbase_jd_atom *target_katom);
180*4882a593Smuzhiyun void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js,
181*4882a593Smuzhiyun struct kbase_jd_atom *target_katom, u32 sw_flags);
182*4882a593Smuzhiyun void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
183*4882a593Smuzhiyun struct kbase_jd_atom *target_katom);
184*4882a593Smuzhiyun void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action,
185*4882a593Smuzhiyun base_jd_core_req core_reqs, struct kbase_jd_atom *target_katom);
186*4882a593Smuzhiyun void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
187*4882a593Smuzhiyun struct kbase_jd_atom *target_katom);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *event);
190*4882a593Smuzhiyun int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
191*4882a593Smuzhiyun int kbase_event_pending(struct kbase_context *ctx);
192*4882a593Smuzhiyun int kbase_event_init(struct kbase_context *kctx);
193*4882a593Smuzhiyun void kbase_event_close(struct kbase_context *kctx);
194*4882a593Smuzhiyun void kbase_event_cleanup(struct kbase_context *kctx);
195*4882a593Smuzhiyun void kbase_event_wakeup(struct kbase_context *kctx);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun int kbase_process_soft_job(struct kbase_jd_atom *katom);
198*4882a593Smuzhiyun int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
199*4882a593Smuzhiyun void kbase_finish_soft_job(struct kbase_jd_atom *katom);
200*4882a593Smuzhiyun void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
201*4882a593Smuzhiyun void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
202*4882a593Smuzhiyun void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
203*4882a593Smuzhiyun #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
204*4882a593Smuzhiyun void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
205*4882a593Smuzhiyun #endif
206*4882a593Smuzhiyun int kbase_soft_event_update(struct kbase_context *kctx,
207*4882a593Smuzhiyun u64 event,
208*4882a593Smuzhiyun unsigned char new_status);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun bool kbase_replay_process(struct kbase_jd_atom *katom);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun void kbasep_soft_job_timeout_worker(struct timer_list *t);
213*4882a593Smuzhiyun void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* api used internally for register access. Contains validation and tracing */
216*4882a593Smuzhiyun void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
217*4882a593Smuzhiyun int kbase_device_trace_buffer_install(
218*4882a593Smuzhiyun struct kbase_context *kctx, u32 *tb, size_t size);
219*4882a593Smuzhiyun void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* api to be ported per OS, only need to do the raw register access */
222*4882a593Smuzhiyun void kbase_os_reg_write(struct kbase_device *kbdev, u16 offset, u32 value);
223*4882a593Smuzhiyun u32 kbase_os_reg_read(struct kbase_device *kbdev, u16 offset);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun void kbasep_as_do_poke(struct work_struct *work);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /** Returns the name associated with a Mali exception code
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * This function is called from the interrupt handler when a GPU fault occurs.
230*4882a593Smuzhiyun * It reports the details of the fault using KBASE_DEBUG_PRINT_WARN.
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * @param[in] kbdev The kbase device that the GPU fault occurred from.
233*4882a593Smuzhiyun * @param[in] exception_code exception code
234*4882a593Smuzhiyun * @return name associated with the exception code
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun const char *kbase_exception_name(struct kbase_device *kbdev,
237*4882a593Smuzhiyun u32 exception_code);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * Check whether a system suspend is in progress, or has already been suspended
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * The caller should ensure that either kbdev->pm.active_count_lock is held, or
243*4882a593Smuzhiyun * a dmb was executed recently (to ensure the value is most
244*4882a593Smuzhiyun * up-to-date). However, without a lock the value could change afterwards.
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * @return false if a suspend is not in progress
247*4882a593Smuzhiyun * @return !=false otherwise
248*4882a593Smuzhiyun */
kbase_pm_is_suspending(struct kbase_device * kbdev)249*4882a593Smuzhiyun static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun return kbdev->pm.suspending;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * Return the atom's ID, as was originally supplied by userspace in
256*4882a593Smuzhiyun * base_jd_atom_v2::atom_number
257*4882a593Smuzhiyun */
kbase_jd_atom_id(struct kbase_context * kctx,struct kbase_jd_atom * katom)258*4882a593Smuzhiyun static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun int result;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(kctx);
263*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(katom);
264*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(katom->kctx == kctx);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun result = katom - &kctx->jctx.atoms[0];
267*4882a593Smuzhiyun KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
268*4882a593Smuzhiyun return result;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
273*4882a593Smuzhiyun * @kctx: Context pointer
274*4882a593Smuzhiyun * @id: ID of atom to retrieve
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
277*4882a593Smuzhiyun */
kbase_jd_atom_from_id(struct kbase_context * kctx,int id)278*4882a593Smuzhiyun static inline struct kbase_jd_atom *kbase_jd_atom_from_id(
279*4882a593Smuzhiyun struct kbase_context *kctx, int id)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return &kctx->jctx.atoms[id];
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /**
285*4882a593Smuzhiyun * Initialize the disjoint state
286*4882a593Smuzhiyun *
287*4882a593Smuzhiyun * The disjoint event count and state are both set to zero.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * Disjoint functions usage:
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * The disjoint event count should be incremented whenever a disjoint event occurs.
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * There are several cases which are regarded as disjoint behavior. Rather than just increment
294*4882a593Smuzhiyun * the counter during disjoint events we also increment the counter when jobs may be affected
295*4882a593Smuzhiyun * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * Disjoint state is entered during GPU reset and for the entire time that an atom is replaying
298*4882a593Smuzhiyun * (as part of the replay workaround). Increasing the disjoint state also increases the count of
299*4882a593Smuzhiyun * disjoint events.
300*4882a593Smuzhiyun *
301*4882a593Smuzhiyun * The disjoint state is then used to increase the count of disjoint events during job submission
302*4882a593Smuzhiyun * and job completion. Any atom submitted or completed while the disjoint state is greater than
303*4882a593Smuzhiyun * zero is regarded as a disjoint event.
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * The disjoint event counter is also incremented immediately whenever a job is soft stopped
306*4882a593Smuzhiyun * and during context creation.
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * @param kbdev The kbase device
309*4882a593Smuzhiyun */
310*4882a593Smuzhiyun void kbase_disjoint_init(struct kbase_device *kbdev);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun * Increase the count of disjoint events
314*4882a593Smuzhiyun * called when a disjoint event has happened
315*4882a593Smuzhiyun *
316*4882a593Smuzhiyun * @param kbdev The kbase device
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun void kbase_disjoint_event(struct kbase_device *kbdev);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun * Increase the count of disjoint events only if the GPU is in a disjoint state
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * This should be called when something happens which could be disjoint if the GPU
324*4882a593Smuzhiyun * is in a disjoint state. The state refcount keeps track of this.
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * @param kbdev The kbase device
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun void kbase_disjoint_event_potential(struct kbase_device *kbdev);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /**
331*4882a593Smuzhiyun * Returns the count of disjoint events
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * @param kbdev The kbase device
334*4882a593Smuzhiyun * @return the count of disjoint events
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /**
339*4882a593Smuzhiyun * Increment the refcount state indicating that the GPU is in a disjoint state.
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
342*4882a593Smuzhiyun * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
343*4882a593Smuzhiyun * should be called
344*4882a593Smuzhiyun *
345*4882a593Smuzhiyun * @param kbdev The kbase device
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun void kbase_disjoint_state_up(struct kbase_device *kbdev);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * Decrement the refcount state
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
353*4882a593Smuzhiyun *
354*4882a593Smuzhiyun * Called after @ref kbase_disjoint_state_up once the disjoint state is over
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * @param kbdev The kbase device
357*4882a593Smuzhiyun */
358*4882a593Smuzhiyun void kbase_disjoint_state_down(struct kbase_device *kbdev);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * If a job is soft stopped and the number of contexts is >= this value
362*4882a593Smuzhiyun * it is reported as a disjoint event
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun #if !defined(UINT64_MAX)
367*4882a593Smuzhiyun #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
368*4882a593Smuzhiyun #endif
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun #if KBASE_TRACE_ENABLE
371*4882a593Smuzhiyun void kbasep_trace_debugfs_init(struct kbase_device *kbdev);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #ifndef CONFIG_MALI_SYSTEM_TRACE
374*4882a593Smuzhiyun /** Add trace values about a job-slot
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * @note Any functions called through this macro will still be evaluated in
377*4882a593Smuzhiyun * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
378*4882a593Smuzhiyun * functions called to get the parameters supplied to this macro must:
379*4882a593Smuzhiyun * - be static or static inline
380*4882a593Smuzhiyun * - must just return 0 and have no other statements present in the body.
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot) \
383*4882a593Smuzhiyun kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
384*4882a593Smuzhiyun KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, 0)
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /** Add trace values about a job-slot, with info
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * @note Any functions called through this macro will still be evaluated in
389*4882a593Smuzhiyun * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
390*4882a593Smuzhiyun * functions called to get the parameters supplied to this macro must:
391*4882a593Smuzhiyun * - be static or static inline
392*4882a593Smuzhiyun * - must just return 0 and have no other statements present in the body.
393*4882a593Smuzhiyun */
394*4882a593Smuzhiyun #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val) \
395*4882a593Smuzhiyun kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
396*4882a593Smuzhiyun KBASE_TRACE_FLAG_JOBSLOT, 0, jobslot, info_val)
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /** Add trace values about a ctx refcount
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * @note Any functions called through this macro will still be evaluated in
401*4882a593Smuzhiyun * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
402*4882a593Smuzhiyun * functions called to get the parameters supplied to this macro must:
403*4882a593Smuzhiyun * - be static or static inline
404*4882a593Smuzhiyun * - must just return 0 and have no other statements present in the body.
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount) \
407*4882a593Smuzhiyun kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
408*4882a593Smuzhiyun KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, 0)
409*4882a593Smuzhiyun /** Add trace values about a ctx refcount, and info
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun * @note Any functions called through this macro will still be evaluated in
412*4882a593Smuzhiyun * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
413*4882a593Smuzhiyun * functions called to get the parameters supplied to this macro must:
414*4882a593Smuzhiyun * - be static or static inline
415*4882a593Smuzhiyun * - must just return 0 and have no other statements present in the body.
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val) \
418*4882a593Smuzhiyun kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
419*4882a593Smuzhiyun KBASE_TRACE_FLAG_REFCOUNT, refcount, 0, info_val)
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /** Add trace values (no slot or refcount)
422*4882a593Smuzhiyun *
423*4882a593Smuzhiyun * @note Any functions called through this macro will still be evaluated in
424*4882a593Smuzhiyun * Release builds (CONFIG_MALI_DEBUG not defined). Therefore, when KBASE_TRACE_ENABLE == 0 any
425*4882a593Smuzhiyun * functions called to get the parameters supplied to this macro must:
426*4882a593Smuzhiyun * - be static or static inline
427*4882a593Smuzhiyun * - must just return 0 and have no other statements present in the body.
428*4882a593Smuzhiyun */
429*4882a593Smuzhiyun #define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val) \
430*4882a593Smuzhiyun kbasep_trace_add(kbdev, KBASE_TRACE_CODE(code), ctx, katom, gpu_addr, \
431*4882a593Smuzhiyun 0, 0, 0, info_val)
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /** Clear the trace */
434*4882a593Smuzhiyun #define KBASE_TRACE_CLEAR(kbdev) \
435*4882a593Smuzhiyun kbasep_trace_clear(kbdev)
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /** Dump the slot trace */
438*4882a593Smuzhiyun #define KBASE_TRACE_DUMP(kbdev) \
439*4882a593Smuzhiyun kbasep_trace_dump(kbdev)
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /** PRIVATE - do not use directly. Use KBASE_TRACE_ADD() instead */
442*4882a593Smuzhiyun void kbasep_trace_add(struct kbase_device *kbdev, enum kbase_trace_code code, void *ctx, struct kbase_jd_atom *katom, u64 gpu_addr, u8 flags, int refcount, int jobslot, unsigned long info_val);
443*4882a593Smuzhiyun /** PRIVATE - do not use directly. Use KBASE_TRACE_CLEAR() instead */
444*4882a593Smuzhiyun void kbasep_trace_clear(struct kbase_device *kbdev);
445*4882a593Smuzhiyun #else /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
446*4882a593Smuzhiyun /* Dispatch kbase trace events as system trace events */
447*4882a593Smuzhiyun #include <mali_linux_kbase_trace.h>
448*4882a593Smuzhiyun #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
449*4882a593Smuzhiyun trace_mali_##code(jobslot, 0)
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
452*4882a593Smuzhiyun trace_mali_##code(jobslot, info_val)
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
455*4882a593Smuzhiyun trace_mali_##code(refcount, 0)
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
458*4882a593Smuzhiyun trace_mali_##code(refcount, info_val)
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun #define KBASE_TRACE_ADD(kbdev, code, ctx, katom, gpu_addr, info_val)\
461*4882a593Smuzhiyun trace_mali_##code(gpu_addr, info_val)
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun #define KBASE_TRACE_CLEAR(kbdev)\
464*4882a593Smuzhiyun do {\
465*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
466*4882a593Smuzhiyun CSTD_NOP(0);\
467*4882a593Smuzhiyun } while (0)
468*4882a593Smuzhiyun #define KBASE_TRACE_DUMP(kbdev)\
469*4882a593Smuzhiyun do {\
470*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
471*4882a593Smuzhiyun CSTD_NOP(0);\
472*4882a593Smuzhiyun } while (0)
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun #endif /* #ifndef CONFIG_MALI_SYSTEM_TRACE */
475*4882a593Smuzhiyun #else
476*4882a593Smuzhiyun #define KBASE_TRACE_ADD_SLOT(kbdev, code, ctx, katom, gpu_addr, jobslot)\
477*4882a593Smuzhiyun do {\
478*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
479*4882a593Smuzhiyun CSTD_NOP(code);\
480*4882a593Smuzhiyun CSTD_UNUSED(ctx);\
481*4882a593Smuzhiyun CSTD_UNUSED(katom);\
482*4882a593Smuzhiyun CSTD_UNUSED(gpu_addr);\
483*4882a593Smuzhiyun CSTD_UNUSED(jobslot);\
484*4882a593Smuzhiyun } while (0)
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun #define KBASE_TRACE_ADD_SLOT_INFO(kbdev, code, ctx, katom, gpu_addr, jobslot, info_val)\
487*4882a593Smuzhiyun do {\
488*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
489*4882a593Smuzhiyun CSTD_NOP(code);\
490*4882a593Smuzhiyun CSTD_UNUSED(ctx);\
491*4882a593Smuzhiyun CSTD_UNUSED(katom);\
492*4882a593Smuzhiyun CSTD_UNUSED(gpu_addr);\
493*4882a593Smuzhiyun CSTD_UNUSED(jobslot);\
494*4882a593Smuzhiyun CSTD_UNUSED(info_val);\
495*4882a593Smuzhiyun CSTD_NOP(0);\
496*4882a593Smuzhiyun } while (0)
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun #define KBASE_TRACE_ADD_REFCOUNT(kbdev, code, ctx, katom, gpu_addr, refcount)\
499*4882a593Smuzhiyun do {\
500*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
501*4882a593Smuzhiyun CSTD_NOP(code);\
502*4882a593Smuzhiyun CSTD_UNUSED(ctx);\
503*4882a593Smuzhiyun CSTD_UNUSED(katom);\
504*4882a593Smuzhiyun CSTD_UNUSED(gpu_addr);\
505*4882a593Smuzhiyun CSTD_UNUSED(refcount);\
506*4882a593Smuzhiyun CSTD_NOP(0);\
507*4882a593Smuzhiyun } while (0)
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun #define KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, code, ctx, katom, gpu_addr, refcount, info_val)\
510*4882a593Smuzhiyun do {\
511*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
512*4882a593Smuzhiyun CSTD_NOP(code);\
513*4882a593Smuzhiyun CSTD_UNUSED(ctx);\
514*4882a593Smuzhiyun CSTD_UNUSED(katom);\
515*4882a593Smuzhiyun CSTD_UNUSED(gpu_addr);\
516*4882a593Smuzhiyun CSTD_UNUSED(info_val);\
517*4882a593Smuzhiyun CSTD_NOP(0);\
518*4882a593Smuzhiyun } while (0)
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun #define KBASE_TRACE_ADD(kbdev, code, subcode, ctx, katom, val)\
521*4882a593Smuzhiyun do {\
522*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
523*4882a593Smuzhiyun CSTD_NOP(code);\
524*4882a593Smuzhiyun CSTD_UNUSED(subcode);\
525*4882a593Smuzhiyun CSTD_UNUSED(ctx);\
526*4882a593Smuzhiyun CSTD_UNUSED(katom);\
527*4882a593Smuzhiyun CSTD_UNUSED(val);\
528*4882a593Smuzhiyun CSTD_NOP(0);\
529*4882a593Smuzhiyun } while (0)
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun #define KBASE_TRACE_CLEAR(kbdev)\
532*4882a593Smuzhiyun do {\
533*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
534*4882a593Smuzhiyun CSTD_NOP(0);\
535*4882a593Smuzhiyun } while (0)
536*4882a593Smuzhiyun #define KBASE_TRACE_DUMP(kbdev)\
537*4882a593Smuzhiyun do {\
538*4882a593Smuzhiyun CSTD_UNUSED(kbdev);\
539*4882a593Smuzhiyun CSTD_NOP(0);\
540*4882a593Smuzhiyun } while (0)
541*4882a593Smuzhiyun #endif /* KBASE_TRACE_ENABLE */
542*4882a593Smuzhiyun /** PRIVATE - do not use directly. Use KBASE_TRACE_DUMP() instead */
543*4882a593Smuzhiyun void kbasep_trace_dump(struct kbase_device *kbdev);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun #ifdef CONFIG_MALI_DEBUG
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * kbase_set_driver_inactive - Force driver to go inactive
548*4882a593Smuzhiyun * @kbdev: Device pointer
549*4882a593Smuzhiyun * @inactive: true if driver should go inactive, false otherwise
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * Forcing the driver inactive will cause all future IOCTLs to wait until the
552*4882a593Smuzhiyun * driver is made active again. This is intended solely for the use of tests
553*4882a593Smuzhiyun * which require that no jobs are running while the test executes.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun void kbase_set_driver_inactive(struct kbase_device *kbdev, bool inactive);
556*4882a593Smuzhiyun #endif /* CONFIG_MALI_DEBUG */
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_MALI_NO_MALI)
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* kbase_io_history_init - initialize data struct for register access history
562*4882a593Smuzhiyun *
563*4882a593Smuzhiyun * @kbdev The register history to initialize
564*4882a593Smuzhiyun * @n The number of register accesses that the buffer could hold
565*4882a593Smuzhiyun *
566*4882a593Smuzhiyun * @return 0 if successfully initialized, failure otherwise
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun int kbase_io_history_init(struct kbase_io_history *h, u16 n);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /* kbase_io_history_term - uninit all resources for the register access history
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * @h The register history to terminate
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun void kbase_io_history_term(struct kbase_io_history *h);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* kbase_io_history_dump - print the register history to the kernel ring buffer
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * @kbdev Pointer to kbase_device containing the register history to dump
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun void kbase_io_history_dump(struct kbase_device *kbdev);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /**
583*4882a593Smuzhiyun * kbase_io_history_resize - resize the register access history buffer.
584*4882a593Smuzhiyun *
585*4882a593Smuzhiyun * @h: Pointer to a valid register history to resize
586*4882a593Smuzhiyun * @new_size: Number of accesses the buffer could hold
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * A successful resize will clear all recent register accesses.
589*4882a593Smuzhiyun * If resizing fails for any reason (e.g., could not allocate memory, invalid
590*4882a593Smuzhiyun * buffer size) then the original buffer will be kept intact.
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * @return 0 if the buffer was resized, failure otherwise
593*4882a593Smuzhiyun */
594*4882a593Smuzhiyun int kbase_io_history_resize(struct kbase_io_history *h, u16 new_size);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun #else /* CONFIG_DEBUG_FS */
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun #define kbase_io_history_init(...) ((int)0)
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun #define kbase_io_history_term CSTD_NOP
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun #define kbase_io_history_dump CSTD_NOP
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun #define kbase_io_history_resize CSTD_NOP
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun #endif
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun
613