xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/debug/backend/mali_kbase_debug_ktrace_codes_csf.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE         *****
24  * *****            DO NOT INCLUDE DIRECTLY                  *****
25  * *****            THE LACK OF HEADER GUARDS IS INTENTIONAL *****
26  */
27 
28 /*
29  * The purpose of this header file is just to contain a list of trace code
30  * identifiers
31  *
32  * When updating this file, also remember to update
33  * mali_kbase_debug_linux_ktrace_csf.h
34  *
35  * IMPORTANT: THIS FILE MUST NOT BE USED FOR ANY OTHER PURPOSE OTHER THAN THAT
36  * DESCRIBED IN mali_kbase_debug_ktrace_codes.h
37  */
38 
39 #if 0 /* Dummy section to avoid breaking formatting */
40 int dummy_array[] = {
41 #endif
42 	/*
43 	 * Generic CSF events
44 	 */
45 	/* info_val = 0 */
46 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_EVICT_CTX_SLOTS_START),
47 	/* info_val == number of CSGs supported */
48 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_EVICT_CTX_SLOTS_END),
49 	/* info_val[0:7]   == fw version_minor
50 	 * info_val[15:8]  == fw version_major
51 	 * info_val[63:32] == fw version_hash
52 	 */
53 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_FIRMWARE_BOOT),
54 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_FIRMWARE_REBOOT),
55 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TOCK_INVOKE),
56 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TICK_INVOKE),
57 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TOCK_START),
58 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TOCK_END),
59 	/* info_val == total number of runnable groups across all kctxs */
60 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TICK_START),
61 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TICK_END),
62 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_RESET_START),
63 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_RESET_END),
64 	/* info_val = timeout in ms */
65 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_PROTM_WAIT_QUIT_START),
66 	/* info_val = remaining ms timeout, or 0 if timedout */
67 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_PROTM_WAIT_QUIT_END),
68 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GROUP_SYNC_UPDATE_EVENT),
69 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_SYNC_UPDATE_NOTIFY_GPU_EVENT),
70 
71 	/* info_val = JOB_IRQ_STATUS */
72 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_INTERRUPT_START),
73 	/* info_val = JOB_IRQ_STATUS */
74 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_INTERRUPT_END),
75 	/* info_val = JOB_IRQ_STATUS */
76 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_PROCESS_START),
77 	/* info_val = GLB_REQ ^ GLB_ACQ */
78 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_INTERRUPT_GLB_REQ_ACK),
79 	/* info_val[31:0] = num non idle offslot groups
80 	 * info_val[32] = scheduler can suspend on idle
81 	 */
82 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GPU_IDLE_EVENT_CAN_SUSPEND),
83 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TICK_ADVANCE),
84 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TICK_NOADVANCE),
85 	/* kctx is added to the back of the list */
86 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_RUNNABLE_KCTX_INSERT),
87 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_RUNNABLE_KCTX_REMOVE),
88 	/* kctx is moved to the back of the list */
89 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_RUNNABLE_KCTX_ROTATE),
90 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_RUNNABLE_KCTX_HEAD),
91 
92 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GPU_IDLE_WORKER_START),
93 	/* 4-bit encoding of boolean values (ease of reading as hex values)
94 	 *
95 	 * info_val[3:0] = was reset active/failed to be prevented
96 	 * info_val[7:4] = whether scheduler was both idle and suspendable
97 	 * info_val[11:8] = whether all groups were suspended
98 	 */
99 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GPU_IDLE_WORKER_END),
100 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GROUP_SYNC_UPDATE_WORKER_START),
101 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GROUP_SYNC_UPDATE_WORKER_END),
102 
103 	/* info_val = bitmask of slots that gave an ACK for STATUS_UPDATE */
104 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_UPDATE_IDLE_SLOTS_ACK),
105 
106 	/* info_val[63:0] = GPU cycle counter, used mainly for benchmarking
107 	 * purpose.
108 	 */
109 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GPU_IDLE_WORKER_HANDLING_START),
110 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_GPU_IDLE_WORKER_HANDLING_END),
111 
112 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_FIRMWARE_MCU_HALTED),
113 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_FIRMWARE_MCU_SLEEP),
114 
115 	/*
116 	 * Group events
117 	 */
118 	/* info_val[2:0] == CSG_REQ state issued
119 	 * info_val[19:16] == as_nr
120 	 * info_val[63:32] == endpoint config (max number of endpoints allowed)
121 	 */
122 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_START_REQ),
123 	/* info_val == CSG_REQ state issued */
124 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_STOP_REQ),
125 	/* info_val == CSG_ACK state */
126 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_RUNNING),
127 	/* info_val == CSG_ACK state */
128 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_STOPPED),
129 	/* info_val == slot cleaned */
130 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_CLEANED),
131 	/* info_val = slot requesting STATUS_UPDATE */
132 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_UPDATE_IDLE_SLOT_REQ),
133 	/* info_val = scheduler's new csg_slots_idle_mask[0]
134 	 * group->csg_nr indicates which bit was set
135 	 */
136 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_IDLE_SET),
137 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_NO_NON_IDLE_GROUPS),
138 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_NON_IDLE_GROUPS),
139 	/* info_val = scheduler's new csg_slots_idle_mask[0]
140 	 * group->csg_nr indicates which bit was cleared
141 	 *
142 	 * in case of no group, multiple bits may have been updated
143 	 */
144 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_IDLE_CLEAR),
145 	/* info_val == previous priority */
146 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_SLOT_PRIO_UPDATE),
147 	/* info_val == CSG_REQ ^ CSG_ACK */
148 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_SYNC_UPDATE),
149 	/* info_val == CSG_REQ ^ CSG_ACK */
150 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_IDLE),
151 	/* info_val == CSG_REQ ^ CSG_ACK */
152 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_PROGRESS_TIMER_EVENT),
153 	/* info_val[31:0] == CSG_REQ ^ CSG_ACQ
154 	 * info_val[63:32] == CSG_IRQ_REQ ^ CSG_IRQ_ACK
155 	 */
156 	KBASE_KTRACE_CODE_MAKE_CODE(CSG_INTERRUPT_PROCESS_END),
157 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_SYNC_UPDATE_DONE),
158 	/* info_val == run state of the group */
159 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_DESCHEDULE),
160 	/* info_val == run state of the group */
161 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_SCHEDULE),
162 	/* info_val[31:0] == new run state of the evicted group
163 	 * info_val[63:32] == number of runnable groups
164 	 */
165 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_EVICT),
166 
167 	/* info_val == new num_runnable_grps
168 	 * group is added to the back of the list for its priority level
169 	 */
170 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_RUNNABLE_INSERT),
171 	/* info_val == new num_runnable_grps
172 	 */
173 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_RUNNABLE_REMOVE),
174 	/* info_val == num_runnable_grps
175 	 * group is moved to the back of the list for its priority level
176 	 */
177 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_RUNNABLE_ROTATE),
178 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_RUNNABLE_HEAD),
179 	/* info_val == new num_idle_wait_grps
180 	 * group is added to the back of the list
181 	 */
182 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_IDLE_WAIT_INSERT),
183 	/* info_val == new num_idle_wait_grps
184 	 * group is added to the back of the list
185 	 */
186 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_IDLE_WAIT_REMOVE),
187 	KBASE_KTRACE_CODE_MAKE_CODE(GROUP_IDLE_WAIT_HEAD),
188 
189 	/* info_val == is scheduler running with protected mode tasks */
190 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_PROTM_ENTER_CHECK),
191 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_PROTM_ENTER),
192 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_PROTM_EXIT),
193 	/* info_val[31:0] == number of GPU address space slots in use
194 	 * info_val[63:32] == number of runnable groups
195 	 */
196 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_TOP_GRP),
197 	/* info_val == new count of off-slot non-idle groups
198 	 * no group indicates it was set rather than incremented
199 	 */
200 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_NONIDLE_OFFSLOT_GRP_INC),
201 	/* info_val == new count of off-slot non-idle groups */
202 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_NONIDLE_OFFSLOT_GRP_DEC),
203 	/* info_val = scheduler's new csg_slots_idle_mask[0]
204 	 * group->csg_nr indicates which bit was set
205 	 */
206 	KBASE_KTRACE_CODE_MAKE_CODE(SCHEDULER_HANDLE_IDLE_SLOTS),
207 
208 	KBASE_KTRACE_CODE_MAKE_CODE(PROTM_EVENT_WORKER_START),
209 	KBASE_KTRACE_CODE_MAKE_CODE(PROTM_EVENT_WORKER_END),
210 
211 	/* info_val = scheduler state */
212 	KBASE_KTRACE_CODE_MAKE_CODE(SCHED_BUSY),
213 	KBASE_KTRACE_CODE_MAKE_CODE(SCHED_INACTIVE),
214 	KBASE_KTRACE_CODE_MAKE_CODE(SCHED_SUSPENDED),
215 	KBASE_KTRACE_CODE_MAKE_CODE(SCHED_SLEEPING),
216 
217 	/* info_val = mcu state */
218 #define KBASEP_MCU_STATE(n) KBASE_KTRACE_CODE_MAKE_CODE(PM_MCU_ ## n),
219 #include "backend/gpu/mali_kbase_pm_mcu_states.h"
220 #undef KBASEP_MCU_STATE
221 
222 	/* info_val = number of runnable groups */
223 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_INACTIVE),
224 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_RUNNABLE),
225 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_IDLE),
226 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_SUSPENDED),
227 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_SUSPENDED_ON_IDLE),
228 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_SUSPENDED_ON_WAIT_SYNC),
229 	/* info_val = new run state of the evicted group */
230 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_FAULT_EVICTED),
231 	/* info_val = get the number of active CSGs */
232 	KBASE_KTRACE_CODE_MAKE_CODE(CSF_GROUP_TERMINATED),
233 
234 	/*
235 	 * Group + Queue events
236 	 */
237 	/* info_val == queue->enabled */
238 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_START),
239 	/* info_val == queue->enabled before stop */
240 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_STOP),
241 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_STOP_REQ),
242 	/* info_val == CS_REQ ^ CS_ACK that were not processed due to the group
243 	 * being suspended
244 	 */
245 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_INTERRUPT_GROUP_SUSPENDS_IGNORED),
246 	/* info_val == CS_REQ ^ CS_ACK */
247 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_INTERRUPT_FAULT),
248 	/* info_val == CS_REQ ^ CS_ACK */
249 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_INTERRUPT_TILER_OOM),
250 	/* info_val == CS_REQ ^ CS_ACK */
251 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_INTERRUPT_PROTM_PEND),
252 	/* info_val == CS_ACK_PROTM_PEND ^ CS_REQ_PROTM_PEND */
253 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_PROTM_ACK),
254 	/* info_val == group->run_State (for group the queue is bound to) */
255 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_START),
256 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_STOP),
257 	/* info_val == contents of CS_STATUS_WAIT_SYNC_POINTER */
258 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_SYNC_UPDATE_EVAL_START),
259 	/* info_val == bool for result of the evaluation */
260 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_SYNC_UPDATE_EVAL_END),
261 	/* info_val == contents of CS_STATUS_WAIT */
262 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_SYNC_UPDATE_WAIT_STATUS),
263 	/* info_val == current sync value pointed to by queue->sync_ptr */
264 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_SYNC_UPDATE_CUR_VAL),
265 	/* info_val == current value of CS_STATUS_WAIT_SYNC_VALUE */
266 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_SYNC_UPDATE_TEST_VAL),
267 	/* info_val == current value of CS_STATUS_BLOCKED_REASON */
268 	KBASE_KTRACE_CODE_MAKE_CODE(QUEUE_SYNC_UPDATE_BLOCKED_REASON),
269 	/* info_val = group's new protm_pending_bitmap[0]
270 	 * queue->csi_index indicates which bit was set
271 	 */
272 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_PROTM_PEND_SET),
273 	/* info_val = group's new protm_pending_bitmap[0]
274 	 * queue->csi_index indicates which bit was cleared
275 	 */
276 	KBASE_KTRACE_CODE_MAKE_CODE(CSI_PROTM_PEND_CLEAR),
277 
278 	/*
279 	 * KCPU queue events
280 	 */
281 	/* KTrace info_val == KCPU queue fence context
282 	 * KCPU extra_info_val == N/A.
283 	 */
284 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_QUEUE_CREATE),
285 	/* KTrace info_val == Number of pending commands in KCPU queue when
286 	 * it is destroyed.
287 	 * KCPU extra_info_val == Number of CQS wait operations present in
288 	 * the KCPU queue when it is destroyed.
289 	 */
290 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_QUEUE_DELETE),
291 	/* KTrace info_val == CQS event memory address
292 	 * KCPU extra_info_val == Upper 32 bits of event memory, i.e. contents
293 	 * of error field.
294 	 */
295 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_CQS_SET),
296 	/* KTrace info_val == Number of CQS objects to be waited upon
297 	 * KCPU extra_info_val == N/A.
298 	 */
299 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_CQS_WAIT_START),
300 	/* KTrace info_val == CQS event memory address
301 	 * KCPU extra_info_val == 1 if CQS was signaled with an error and queue
302 	 * inherited the error, otherwise 0.
303 	 */
304 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_CQS_WAIT_END),
305 	/* KTrace info_val == Fence context
306 	 * KCPU extra_info_val == Fence seqno.
307 	 */
308 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_FENCE_SIGNAL),
309 	/* KTrace info_val == Fence context
310 	 * KCPU extra_info_val == Fence seqno.
311 	 */
312 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_FENCE_WAIT_START),
313 	/* KTrace info_val == Fence context
314 	 * KCPU extra_info_val == Fence seqno.
315 	 */
316 	KBASE_KTRACE_CODE_MAKE_CODE(KCPU_FENCE_WAIT_END),
317 
318 #if 0 /* Dummy section to avoid breaking formatting */
319 };
320 #endif
321 
322 	/* ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
323