xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/tl/mali_kbase_tracepoints.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * THIS FILE IS AUTOGENERATED BY generate_tracepoints.py.
24  * DO NOT EDIT.
25  */
26 
27 #include "mali_kbase_tracepoints.h"
28 #include "mali_kbase_tlstream.h"
29 #include "mali_kbase_tl_serialize.h"
30 
31 /* clang-format off */
32 
33 /* Message ids of trace events that are recorded in the obj stream. */
34 enum tl_msg_id_obj {
35 	KBASE_TL_NEW_CTX,
36 	KBASE_TL_NEW_GPU,
37 	KBASE_TL_NEW_LPU,
38 	KBASE_TL_NEW_ATOM,
39 	KBASE_TL_NEW_AS,
40 	KBASE_TL_DEL_CTX,
41 	KBASE_TL_DEL_ATOM,
42 	KBASE_TL_LIFELINK_LPU_GPU,
43 	KBASE_TL_LIFELINK_AS_GPU,
44 	KBASE_TL_RET_CTX_LPU,
45 	KBASE_TL_RET_ATOM_CTX,
46 	KBASE_TL_RET_ATOM_LPU,
47 	KBASE_TL_NRET_CTX_LPU,
48 	KBASE_TL_NRET_ATOM_CTX,
49 	KBASE_TL_NRET_ATOM_LPU,
50 	KBASE_TL_RET_AS_CTX,
51 	KBASE_TL_NRET_AS_CTX,
52 	KBASE_TL_RET_ATOM_AS,
53 	KBASE_TL_NRET_ATOM_AS,
54 	KBASE_TL_ATTRIB_ATOM_CONFIG,
55 	KBASE_TL_JIT_USEDPAGES,
56 	KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
57 	KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
58 	KBASE_TL_ATTRIB_AS_CONFIG,
59 	KBASE_TL_EVENT_LPU_SOFTSTOP,
60 	KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
61 	KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
62 	KBASE_TL_EVENT_ATOM_SOFTJOB_START,
63 	KBASE_TL_EVENT_ATOM_SOFTJOB_END,
64 	KBASE_TL_ARBITER_GRANTED,
65 	KBASE_TL_ARBITER_STARTED,
66 	KBASE_TL_ARBITER_STOP_REQUESTED,
67 	KBASE_TL_ARBITER_STOPPED,
68 	KBASE_TL_ARBITER_REQUESTED,
69 	KBASE_JD_GPU_SOFT_RESET,
70 	KBASE_JD_TILER_HEAP_CHUNK_ALLOC,
71 	KBASE_TL_JS_SCHED_START,
72 	KBASE_TL_JS_SCHED_END,
73 	KBASE_TL_JD_SUBMIT_ATOM_START,
74 	KBASE_TL_JD_SUBMIT_ATOM_END,
75 	KBASE_TL_JD_DONE_NO_LOCK_START,
76 	KBASE_TL_JD_DONE_NO_LOCK_END,
77 	KBASE_TL_JD_DONE_START,
78 	KBASE_TL_JD_DONE_END,
79 	KBASE_TL_JD_ATOM_COMPLETE,
80 	KBASE_TL_RUN_ATOM_START,
81 	KBASE_TL_RUN_ATOM_END,
82 	KBASE_TL_ATTRIB_ATOM_PRIORITY,
83 	KBASE_TL_ATTRIB_ATOM_STATE,
84 	KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
85 	KBASE_TL_ATTRIB_ATOM_JIT,
86 	KBASE_TL_KBASE_NEW_DEVICE,
87 	KBASE_TL_KBASE_GPUCMDQUEUE_KICK,
88 	KBASE_TL_KBASE_DEVICE_PROGRAM_CSG,
89 	KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG,
90 	KBASE_TL_KBASE_DEVICE_HALTING_CSG,
91 	KBASE_TL_KBASE_DEVICE_SUSPEND_CSG,
92 	KBASE_TL_KBASE_DEVICE_CSG_IDLE,
93 	KBASE_TL_KBASE_NEW_CTX,
94 	KBASE_TL_KBASE_DEL_CTX,
95 	KBASE_TL_KBASE_CTX_ASSIGN_AS,
96 	KBASE_TL_KBASE_CTX_UNASSIGN_AS,
97 	KBASE_TL_KBASE_NEW_KCPUQUEUE,
98 	KBASE_TL_KBASE_DEL_KCPUQUEUE,
99 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL,
100 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
101 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT,
102 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET,
103 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION,
104 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION,
105 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
106 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
107 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE,
108 	KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
109 	KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
110 	KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
111 	KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
112 	KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
113 	KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
114 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER,
115 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND,
116 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
117 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
118 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
119 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END,
120 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
121 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
122 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET,
123 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START,
124 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END,
125 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION,
126 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
127 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
128 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
129 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END,
130 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START,
131 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END,
132 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START,
133 	KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
134 	KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
135 	KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END,
136 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START,
137 	KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END,
138 	KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END,
139 	KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END,
140 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER,
141 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START,
142 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END,
143 	KBASE_TL_KBASE_CSFFW_FW_RELOADING,
144 	KBASE_TL_KBASE_CSFFW_FW_ENABLING,
145 	KBASE_TL_KBASE_CSFFW_FW_REQUEST_SLEEP,
146 	KBASE_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP,
147 	KBASE_TL_KBASE_CSFFW_FW_REQUEST_HALT,
148 	KBASE_TL_KBASE_CSFFW_FW_DISABLING,
149 	KBASE_TL_KBASE_CSFFW_FW_OFF,
150 	KBASE_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW,
151 	KBASE_OBJ_MSG_COUNT,
152 };
153 
154 #define OBJ_TP_LIST \
155 	TRACEPOINT_DESC(KBASE_TL_NEW_CTX, \
156 		"object ctx is created", \
157 		"@pII", \
158 		"ctx,ctx_nr,tgid") \
159 	TRACEPOINT_DESC(KBASE_TL_NEW_GPU, \
160 		"object gpu is created", \
161 		"@pII", \
162 		"gpu,gpu_id,core_count") \
163 	TRACEPOINT_DESC(KBASE_TL_NEW_LPU, \
164 		"object lpu is created", \
165 		"@pII", \
166 		"lpu,lpu_nr,lpu_fn") \
167 	TRACEPOINT_DESC(KBASE_TL_NEW_ATOM, \
168 		"object atom is created", \
169 		"@pI", \
170 		"atom,atom_nr") \
171 	TRACEPOINT_DESC(KBASE_TL_NEW_AS, \
172 		"address space object is created", \
173 		"@pI", \
174 		"address_space,as_nr") \
175 	TRACEPOINT_DESC(KBASE_TL_DEL_CTX, \
176 		"context is destroyed", \
177 		"@p", \
178 		"ctx") \
179 	TRACEPOINT_DESC(KBASE_TL_DEL_ATOM, \
180 		"atom is destroyed", \
181 		"@p", \
182 		"atom") \
183 	TRACEPOINT_DESC(KBASE_TL_LIFELINK_LPU_GPU, \
184 		"lpu is deleted with gpu", \
185 		"@pp", \
186 		"lpu,gpu") \
187 	TRACEPOINT_DESC(KBASE_TL_LIFELINK_AS_GPU, \
188 		"address space is deleted with gpu", \
189 		"@pp", \
190 		"address_space,gpu") \
191 	TRACEPOINT_DESC(KBASE_TL_RET_CTX_LPU, \
192 		"context is retained by lpu", \
193 		"@pp", \
194 		"ctx,lpu") \
195 	TRACEPOINT_DESC(KBASE_TL_RET_ATOM_CTX, \
196 		"atom is retained by context", \
197 		"@pp", \
198 		"atom,ctx") \
199 	TRACEPOINT_DESC(KBASE_TL_RET_ATOM_LPU, \
200 		"atom is retained by lpu", \
201 		"@pps", \
202 		"atom,lpu,attrib_match_list") \
203 	TRACEPOINT_DESC(KBASE_TL_NRET_CTX_LPU, \
204 		"context is released by lpu", \
205 		"@pp", \
206 		"ctx,lpu") \
207 	TRACEPOINT_DESC(KBASE_TL_NRET_ATOM_CTX, \
208 		"atom is released by context", \
209 		"@pp", \
210 		"atom,ctx") \
211 	TRACEPOINT_DESC(KBASE_TL_NRET_ATOM_LPU, \
212 		"atom is released by lpu", \
213 		"@pp", \
214 		"atom,lpu") \
215 	TRACEPOINT_DESC(KBASE_TL_RET_AS_CTX, \
216 		"address space is retained by context", \
217 		"@pp", \
218 		"address_space,ctx") \
219 	TRACEPOINT_DESC(KBASE_TL_NRET_AS_CTX, \
220 		"address space is released by context", \
221 		"@pp", \
222 		"address_space,ctx") \
223 	TRACEPOINT_DESC(KBASE_TL_RET_ATOM_AS, \
224 		"atom is retained by address space", \
225 		"@pp", \
226 		"atom,address_space") \
227 	TRACEPOINT_DESC(KBASE_TL_NRET_ATOM_AS, \
228 		"atom is released by address space", \
229 		"@pp", \
230 		"atom,address_space") \
231 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_CONFIG, \
232 		"atom job slot attributes", \
233 		"@pLLI", \
234 		"atom,descriptor,affinity,config") \
235 	TRACEPOINT_DESC(KBASE_TL_JIT_USEDPAGES, \
236 		"used pages for jit", \
237 		"@LI", \
238 		"used_pages,j_id") \
239 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_JITALLOCINFO, \
240 		"Information about JIT allocations", \
241 		"@pLLLIIIII", \
242 		"atom,va_pgs,com_pgs,extent,j_id,bin_id,max_allocs,jit_flags,usg_id") \
243 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_JITFREEINFO, \
244 		"Information about JIT frees", \
245 		"@pI", \
246 		"atom,j_id") \
247 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_AS_CONFIG, \
248 		"address space attributes", \
249 		"@pLLL", \
250 		"address_space,transtab,memattr,transcfg") \
251 	TRACEPOINT_DESC(KBASE_TL_EVENT_LPU_SOFTSTOP, \
252 		"softstop event on given lpu", \
253 		"@p", \
254 		"lpu") \
255 	TRACEPOINT_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_EX, \
256 		"atom softstopped", \
257 		"@p", \
258 		"atom") \
259 	TRACEPOINT_DESC(KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE, \
260 		"atom softstop issued", \
261 		"@p", \
262 		"atom") \
263 	TRACEPOINT_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_START, \
264 		"atom soft job has started", \
265 		"@p", \
266 		"atom") \
267 	TRACEPOINT_DESC(KBASE_TL_EVENT_ATOM_SOFTJOB_END, \
268 		"atom soft job has completed", \
269 		"@p", \
270 		"atom") \
271 	TRACEPOINT_DESC(KBASE_TL_ARBITER_GRANTED, \
272 		"Arbiter has granted gpu access", \
273 		"@p", \
274 		"gpu") \
275 	TRACEPOINT_DESC(KBASE_TL_ARBITER_STARTED, \
276 		"Driver is running again and able to process jobs", \
277 		"@p", \
278 		"gpu") \
279 	TRACEPOINT_DESC(KBASE_TL_ARBITER_STOP_REQUESTED, \
280 		"Arbiter has requested driver to stop using gpu", \
281 		"@p", \
282 		"gpu") \
283 	TRACEPOINT_DESC(KBASE_TL_ARBITER_STOPPED, \
284 		"Driver has stopped using gpu", \
285 		"@p", \
286 		"gpu") \
287 	TRACEPOINT_DESC(KBASE_TL_ARBITER_REQUESTED, \
288 		"Driver has requested the arbiter for gpu access", \
289 		"@p", \
290 		"gpu") \
291 	TRACEPOINT_DESC(KBASE_JD_GPU_SOFT_RESET, \
292 		"gpu soft reset", \
293 		"@p", \
294 		"gpu") \
295 	TRACEPOINT_DESC(KBASE_JD_TILER_HEAP_CHUNK_ALLOC, \
296 		"Tiler Heap Chunk Allocation", \
297 		"@ILL", \
298 		"ctx_nr,heap_id,chunk_va") \
299 	TRACEPOINT_DESC(KBASE_TL_JS_SCHED_START, \
300 		"Scheduling starts", \
301 		"@I", \
302 		"dummy") \
303 	TRACEPOINT_DESC(KBASE_TL_JS_SCHED_END, \
304 		"Scheduling ends", \
305 		"@I", \
306 		"dummy") \
307 	TRACEPOINT_DESC(KBASE_TL_JD_SUBMIT_ATOM_START, \
308 		"Submitting an atom starts", \
309 		"@p", \
310 		"atom") \
311 	TRACEPOINT_DESC(KBASE_TL_JD_SUBMIT_ATOM_END, \
312 		"Submitting an atom ends", \
313 		"@p", \
314 		"atom") \
315 	TRACEPOINT_DESC(KBASE_TL_JD_DONE_NO_LOCK_START, \
316 		"Within function kbase_jd_done_nolock", \
317 		"@p", \
318 		"atom") \
319 	TRACEPOINT_DESC(KBASE_TL_JD_DONE_NO_LOCK_END, \
320 		"Within function kbase_jd_done_nolock - end", \
321 		"@p", \
322 		"atom") \
323 	TRACEPOINT_DESC(KBASE_TL_JD_DONE_START, \
324 		"Start of kbase_jd_done", \
325 		"@p", \
326 		"atom") \
327 	TRACEPOINT_DESC(KBASE_TL_JD_DONE_END, \
328 		"End of kbase_jd_done", \
329 		"@p", \
330 		"atom") \
331 	TRACEPOINT_DESC(KBASE_TL_JD_ATOM_COMPLETE, \
332 		"Atom marked complete", \
333 		"@p", \
334 		"atom") \
335 	TRACEPOINT_DESC(KBASE_TL_RUN_ATOM_START, \
336 		"Running of atom starts", \
337 		"@pI", \
338 		"atom,atom_nr") \
339 	TRACEPOINT_DESC(KBASE_TL_RUN_ATOM_END, \
340 		"Running of atom ends", \
341 		"@pI", \
342 		"atom,atom_nr") \
343 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITY, \
344 		"atom priority", \
345 		"@pI", \
346 		"atom,prio") \
347 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_STATE, \
348 		"atom state", \
349 		"@pI", \
350 		"atom,state") \
351 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITIZED, \
352 		"atom caused priority change", \
353 		"@p", \
354 		"atom") \
355 	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_JIT, \
356 		"jit done for atom", \
357 		"@pLLILILLL", \
358 		"atom,edit_addr,new_addr,jit_flags,mem_flags,j_id,com_pgs,extent,va_pgs") \
359 	TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_DEVICE, \
360 		"New KBase Device", \
361 		"@IIIIIII", \
362 		"kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs,kbase_device_as_count,kbase_device_sb_entry_count,kbase_device_has_cross_stream_sync,kbase_device_supports_gpu_sleep") \
363 	TRACEPOINT_DESC(KBASE_TL_KBASE_GPUCMDQUEUE_KICK, \
364 		"Kernel receives a request to process new GPU queue instructions", \
365 		"@IL", \
366 		"kernel_ctx_id,buffer_gpu_addr") \
367 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_PROGRAM_CSG, \
368 		"CSG is programmed to a slot", \
369 		"@IIIII", \
370 		"kbase_device_id,kernel_ctx_id,gpu_cmdq_grp_handle,kbase_device_csg_slot_index,kbase_device_csg_slot_resuming") \
371 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG, \
372 		"CSG is deprogrammed from a slot", \
373 		"@II", \
374 		"kbase_device_id,kbase_device_csg_slot_index") \
375 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_HALTING_CSG, \
376 		"CSG is halting", \
377 		"@III", \
378 		"kbase_device_id,kbase_device_csg_slot_index,kbase_device_csg_slot_suspending") \
379 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_SUSPEND_CSG, \
380 		"CSG is suspended", \
381 		"@II", \
382 		"kbase_device_id,kbase_device_csg_slot_index") \
383 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_CSG_IDLE, \
384 		"KBase device is notified that CSG is idle.", \
385 		"@II", \
386 		"kbase_device_id,kbase_device_csg_slot_index") \
387 	TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_CTX, \
388 		"New KBase Context", \
389 		"@II", \
390 		"kernel_ctx_id,kbase_device_id") \
391 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEL_CTX, \
392 		"Delete KBase Context", \
393 		"@I", \
394 		"kernel_ctx_id") \
395 	TRACEPOINT_DESC(KBASE_TL_KBASE_CTX_ASSIGN_AS, \
396 		"Address Space is assigned to a KBase context", \
397 		"@II", \
398 		"kernel_ctx_id,kbase_device_as_index") \
399 	TRACEPOINT_DESC(KBASE_TL_KBASE_CTX_UNASSIGN_AS, \
400 		"Address Space is unassigned from a KBase context", \
401 		"@I", \
402 		"kernel_ctx_id") \
403 	TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_KCPUQUEUE, \
404 		"New KCPU Queue", \
405 		"@pIII", \
406 		"kcpu_queue,kcpu_queue_id,kernel_ctx_id,kcpuq_num_pending_cmds") \
407 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEL_KCPUQUEUE, \
408 		"Delete KCPU Queue", \
409 		"@p", \
410 		"kcpu_queue") \
411 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL, \
412 		"KCPU Queue enqueues Signal on Fence", \
413 		"@pp", \
414 		"kcpu_queue,fence") \
415 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT, \
416 		"KCPU Queue enqueues Wait on Fence", \
417 		"@pp", \
418 		"kcpu_queue,fence") \
419 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
420 		"KCPU Queue enqueues Wait on Cross Queue Sync Object", \
421 		"@pLII", \
422 		"kcpu_queue,cqs_obj_gpu_addr,compare_value,inherit_error") \
423 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET, \
424 		"KCPU Queue enqueues Set on Cross Queue Sync Object", \
425 		"@pL", \
426 		"kcpu_queue,cqs_obj_gpu_addr") \
427 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION, \
428 		"KCPU Queue enqueues Wait Operation on Cross Queue Sync Object", \
429 		"@pLLIII", \
430 		"kcpu_queue,cqs_obj_gpu_addr,compare_value,condition,data_type,inherit_error") \
431 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION, \
432 		"KCPU Queue enqueues Set Operation on Cross Queue Sync Object", \
433 		"@pLLII", \
434 		"kcpu_queue,cqs_obj_gpu_addr,value,operation,data_type") \
435 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
436 		"KCPU Queue enqueues Map Import", \
437 		"@pL", \
438 		"kcpu_queue,map_import_buf_gpu_addr") \
439 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT, \
440 		"KCPU Queue enqueues Unmap Import", \
441 		"@pL", \
442 		"kcpu_queue,map_import_buf_gpu_addr") \
443 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE, \
444 		"KCPU Queue enqueues Unmap Import ignoring reference count", \
445 		"@pL", \
446 		"kcpu_queue,map_import_buf_gpu_addr") \
447 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
448 		"Begin array of KCPU Queue enqueues JIT Alloc", \
449 		"@p", \
450 		"kcpu_queue") \
451 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
452 		"Array item of KCPU Queue enqueues JIT Alloc", \
453 		"@pLLLLIIIII", \
454 		"kcpu_queue,jit_alloc_gpu_alloc_addr_dest,jit_alloc_va_pages,jit_alloc_commit_pages,jit_alloc_extent,jit_alloc_jit_id,jit_alloc_bin_id,jit_alloc_max_allocations,jit_alloc_flags,jit_alloc_usage_id") \
455 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
456 		"End array of KCPU Queue enqueues JIT Alloc", \
457 		"@p", \
458 		"kcpu_queue") \
459 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE, \
460 		"Begin array of KCPU Queue enqueues JIT Free", \
461 		"@p", \
462 		"kcpu_queue") \
463 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE, \
464 		"Array item of KCPU Queue enqueues JIT Free", \
465 		"@pI", \
466 		"kcpu_queue,jit_alloc_jit_id") \
467 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE, \
468 		"End array of KCPU Queue enqueues JIT Free", \
469 		"@p", \
470 		"kcpu_queue") \
471 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER, \
472 		"KCPU Queue enqueues Error Barrier", \
473 		"@p", \
474 		"kcpu_queue") \
475 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND, \
476 		"KCPU Queue enqueues Group Suspend", \
477 		"@ppI", \
478 		"kcpu_queue,group_suspend_buf,gpu_cmdq_grp_handle") \
479 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
480 		"KCPU Queue starts a Signal on Fence", \
481 		"@p", \
482 		"kcpu_queue") \
483 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END, \
484 		"KCPU Queue ends a Signal on Fence", \
485 		"@pI", \
486 		"kcpu_queue,execute_error") \
487 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START, \
488 		"KCPU Queue starts a Wait on Fence", \
489 		"@p", \
490 		"kcpu_queue") \
491 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END, \
492 		"KCPU Queue ends a Wait on Fence", \
493 		"@pI", \
494 		"kcpu_queue,execute_error") \
495 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
496 		"KCPU Queue starts a Wait on Cross Queue Sync Object", \
497 		"@p", \
498 		"kcpu_queue") \
499 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
500 		"KCPU Queue ends a Wait on Cross Queue Sync Object", \
501 		"@pI", \
502 		"kcpu_queue,execute_error") \
503 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET, \
504 		"KCPU Queue executes a Set on Cross Queue Sync Object", \
505 		"@pI", \
506 		"kcpu_queue,execute_error") \
507 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START, \
508 		"KCPU Queue starts a Wait Operation on Cross Queue Sync Object", \
509 		"@p", \
510 		"kcpu_queue") \
511 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END, \
512 		"KCPU Queue ends a Wait Operation on Cross Queue Sync Object", \
513 		"@pI", \
514 		"kcpu_queue,execute_error") \
515 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION, \
516 		"KCPU Queue executes a Set Operation on Cross Queue Sync Object", \
517 		"@pI", \
518 		"kcpu_queue,execute_error") \
519 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
520 		"KCPU Queue starts a Map Import", \
521 		"@p", \
522 		"kcpu_queue") \
523 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END, \
524 		"KCPU Queue ends a Map Import", \
525 		"@pI", \
526 		"kcpu_queue,execute_error") \
527 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START, \
528 		"KCPU Queue starts an Unmap Import", \
529 		"@p", \
530 		"kcpu_queue") \
531 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END, \
532 		"KCPU Queue ends an Unmap Import", \
533 		"@pI", \
534 		"kcpu_queue,execute_error") \
535 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START, \
536 		"KCPU Queue starts an Unmap Import ignoring reference count", \
537 		"@p", \
538 		"kcpu_queue") \
539 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END, \
540 		"KCPU Queue ends an Unmap Import ignoring reference count", \
541 		"@pI", \
542 		"kcpu_queue,execute_error") \
543 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START, \
544 		"KCPU Queue starts an array of JIT Allocs", \
545 		"@p", \
546 		"kcpu_queue") \
547 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
548 		"Begin array of KCPU Queue ends an array of JIT Allocs", \
549 		"@p", \
550 		"kcpu_queue") \
551 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
552 		"Array item of KCPU Queue ends an array of JIT Allocs", \
553 		"@pILL", \
554 		"kcpu_queue,execute_error,jit_alloc_gpu_alloc_addr,jit_alloc_mmu_flags") \
555 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END, \
556 		"End array of KCPU Queue ends an array of JIT Allocs", \
557 		"@p", \
558 		"kcpu_queue") \
559 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START, \
560 		"KCPU Queue starts an array of JIT Frees", \
561 		"@p", \
562 		"kcpu_queue") \
563 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
564 		"Begin array of KCPU Queue ends an array of JIT Frees", \
565 		"@p", \
566 		"kcpu_queue") \
567 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
568 		"Array item of KCPU Queue ends an array of JIT Frees", \
569 		"@pIL", \
570 		"kcpu_queue,execute_error,jit_free_pages_used") \
571 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END, \
572 		"End array of KCPU Queue ends an array of JIT Frees", \
573 		"@p", \
574 		"kcpu_queue") \
575 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER, \
576 		"KCPU Queue executes an Error Barrier", \
577 		"@p", \
578 		"kcpu_queue") \
579 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START, \
580 		"KCPU Queue starts a group suspend", \
581 		"@p", \
582 		"kcpu_queue") \
583 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END, \
584 		"KCPU Queue ends a group suspend", \
585 		"@pI", \
586 		"kcpu_queue,execute_error") \
587 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_RELOADING, \
588 		"CSF FW is being reloaded", \
589 		"@L", \
590 		"csffw_cycle") \
591 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_ENABLING, \
592 		"CSF FW is being enabled", \
593 		"@L", \
594 		"csffw_cycle") \
595 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_REQUEST_SLEEP, \
596 		"CSF FW sleep is requested", \
597 		"@L", \
598 		"csffw_cycle") \
599 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP, \
600 		"CSF FW wake up is requested", \
601 		"@L", \
602 		"csffw_cycle") \
603 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_REQUEST_HALT, \
604 		"CSF FW halt is requested", \
605 		"@L", \
606 		"csffw_cycle") \
607 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_DISABLING, \
608 		"CSF FW is being disabled", \
609 		"@L", \
610 		"csffw_cycle") \
611 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_OFF, \
612 		"CSF FW is off", \
613 		"@L", \
614 		"csffw_cycle") \
615 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW, \
616 		"An overflow has happened with the CSFFW Timeline stream", \
617 		"@LL", \
618 		"csffw_timestamp,csffw_cycle")
619 
620 #define MIPE_HEADER_BLOB_VAR_NAME        __obj_desc_header
621 #define MIPE_HEADER_STREAM_ID            TL_STREAM_ID_KERNEL
622 #define MIPE_HEADER_PKT_CLASS            TL_PACKET_CLASS_OBJ
623 #define MIPE_HEADER_TRACEPOINT_LIST      OBJ_TP_LIST
624 #define MIPE_HEADER_TRACEPOINT_LIST_SIZE KBASE_OBJ_MSG_COUNT
625 
626 #include "mali_kbase_mipe_gen_header.h"
627 
628 const char   *obj_desc_header = (const char *) &__obj_desc_header;
629 const size_t  obj_desc_header_size = sizeof(__obj_desc_header);
630 
631 /* Message ids of trace events that are recorded in the aux stream. */
632 enum tl_msg_id_aux {
633 	KBASE_AUX_PM_STATE,
634 	KBASE_AUX_PAGEFAULT,
635 	KBASE_AUX_PAGESALLOC,
636 	KBASE_AUX_DEVFREQ_TARGET,
637 	KBASE_AUX_JIT_STATS,
638 	KBASE_AUX_TILER_HEAP_STATS,
639 	KBASE_AUX_EVENT_JOB_SLOT,
640 	KBASE_AUX_PROTECTED_ENTER_START,
641 	KBASE_AUX_PROTECTED_ENTER_END,
642 	KBASE_AUX_MMU_COMMAND,
643 	KBASE_AUX_PROTECTED_LEAVE_START,
644 	KBASE_AUX_PROTECTED_LEAVE_END,
645 	KBASE_AUX_MSG_COUNT,
646 };
647 
648 #define AUX_TP_LIST \
649 	TRACEPOINT_DESC(KBASE_AUX_PM_STATE, \
650 		"PM state", \
651 		"@IL", \
652 		"core_type,core_state_bitset") \
653 	TRACEPOINT_DESC(KBASE_AUX_PAGEFAULT, \
654 		"Page fault", \
655 		"@IIL", \
656 		"ctx_nr,as_nr,page_cnt_change") \
657 	TRACEPOINT_DESC(KBASE_AUX_PAGESALLOC, \
658 		"Total alloc pages change", \
659 		"@IL", \
660 		"ctx_nr,page_cnt") \
661 	TRACEPOINT_DESC(KBASE_AUX_DEVFREQ_TARGET, \
662 		"New device frequency target", \
663 		"@L", \
664 		"target_freq") \
665 	TRACEPOINT_DESC(KBASE_AUX_JIT_STATS, \
666 		"per-bin JIT statistics", \
667 		"@IIIIII", \
668 		"ctx_nr,bid,max_allocs,allocs,va_pages,ph_pages") \
669 	TRACEPOINT_DESC(KBASE_AUX_TILER_HEAP_STATS, \
670 		"Tiler Heap statistics", \
671 		"@ILIIIIIII", \
672 		"ctx_nr,heap_id,va_pages,ph_pages,max_chunks,chunk_size,chunk_count,target_in_flight,nr_in_flight") \
673 	TRACEPOINT_DESC(KBASE_AUX_EVENT_JOB_SLOT, \
674 		"event on a given job slot", \
675 		"@pIII", \
676 		"ctx,slot_nr,atom_nr,event") \
677 	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_ENTER_START, \
678 		"enter protected mode start", \
679 		"@p", \
680 		"gpu") \
681 	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_ENTER_END, \
682 		"enter protected mode end", \
683 		"@p", \
684 		"gpu") \
685 	TRACEPOINT_DESC(KBASE_AUX_MMU_COMMAND, \
686 		"mmu commands with synchronicity info", \
687 		"@IIILI", \
688 		"kernel_ctx_id,mmu_cmd_id,mmu_synchronicity,mmu_lock_addr,mmu_lock_page_num") \
689 	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_LEAVE_START, \
690 		"leave protected mode start", \
691 		"@p", \
692 		"gpu") \
693 	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_LEAVE_END, \
694 		"leave protected mode end", \
695 		"@p", \
696 		"gpu")
697 
698 #define MIPE_HEADER_BLOB_VAR_NAME        __aux_desc_header
699 #define MIPE_HEADER_STREAM_ID            TL_STREAM_ID_KERNEL
700 #define MIPE_HEADER_PKT_CLASS            TL_PACKET_CLASS_AUX
701 #define MIPE_HEADER_TRACEPOINT_LIST      AUX_TP_LIST
702 #define MIPE_HEADER_TRACEPOINT_LIST_SIZE KBASE_AUX_MSG_COUNT
703 
704 #include "mali_kbase_mipe_gen_header.h"
705 
706 const char   *aux_desc_header = (const char *) &__aux_desc_header;
707 const size_t  aux_desc_header_size = sizeof(__aux_desc_header);
708 
__kbase_tlstream_tl_new_ctx(struct kbase_tlstream * stream,const void * ctx,u32 ctx_nr,u32 tgid)709 void __kbase_tlstream_tl_new_ctx(
710 	struct kbase_tlstream *stream,
711 	const void *ctx,
712 	u32 ctx_nr,
713 	u32 tgid
714 )
715 {
716 	const u32 msg_id = KBASE_TL_NEW_CTX;
717 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
718 		+ sizeof(ctx)
719 		+ sizeof(ctx_nr)
720 		+ sizeof(tgid)
721 		;
722 	char *buffer;
723 	unsigned long acq_flags;
724 	size_t pos = 0;
725 
726 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
727 
728 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
729 	pos = kbasep_serialize_timestamp(buffer, pos);
730 	pos = kbasep_serialize_bytes(buffer,
731 		pos, &ctx, sizeof(ctx));
732 	pos = kbasep_serialize_bytes(buffer,
733 		pos, &ctx_nr, sizeof(ctx_nr));
734 	pos = kbasep_serialize_bytes(buffer,
735 		pos, &tgid, sizeof(tgid));
736 
737 	kbase_tlstream_msgbuf_release(stream, acq_flags);
738 }
739 
__kbase_tlstream_tl_new_gpu(struct kbase_tlstream * stream,const void * gpu,u32 gpu_id,u32 core_count)740 void __kbase_tlstream_tl_new_gpu(
741 	struct kbase_tlstream *stream,
742 	const void *gpu,
743 	u32 gpu_id,
744 	u32 core_count
745 )
746 {
747 	const u32 msg_id = KBASE_TL_NEW_GPU;
748 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
749 		+ sizeof(gpu)
750 		+ sizeof(gpu_id)
751 		+ sizeof(core_count)
752 		;
753 	char *buffer;
754 	unsigned long acq_flags;
755 	size_t pos = 0;
756 
757 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
758 
759 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
760 	pos = kbasep_serialize_timestamp(buffer, pos);
761 	pos = kbasep_serialize_bytes(buffer,
762 		pos, &gpu, sizeof(gpu));
763 	pos = kbasep_serialize_bytes(buffer,
764 		pos, &gpu_id, sizeof(gpu_id));
765 	pos = kbasep_serialize_bytes(buffer,
766 		pos, &core_count, sizeof(core_count));
767 
768 	kbase_tlstream_msgbuf_release(stream, acq_flags);
769 }
770 
__kbase_tlstream_tl_new_lpu(struct kbase_tlstream * stream,const void * lpu,u32 lpu_nr,u32 lpu_fn)771 void __kbase_tlstream_tl_new_lpu(
772 	struct kbase_tlstream *stream,
773 	const void *lpu,
774 	u32 lpu_nr,
775 	u32 lpu_fn
776 )
777 {
778 	const u32 msg_id = KBASE_TL_NEW_LPU;
779 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
780 		+ sizeof(lpu)
781 		+ sizeof(lpu_nr)
782 		+ sizeof(lpu_fn)
783 		;
784 	char *buffer;
785 	unsigned long acq_flags;
786 	size_t pos = 0;
787 
788 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
789 
790 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
791 	pos = kbasep_serialize_timestamp(buffer, pos);
792 	pos = kbasep_serialize_bytes(buffer,
793 		pos, &lpu, sizeof(lpu));
794 	pos = kbasep_serialize_bytes(buffer,
795 		pos, &lpu_nr, sizeof(lpu_nr));
796 	pos = kbasep_serialize_bytes(buffer,
797 		pos, &lpu_fn, sizeof(lpu_fn));
798 
799 	kbase_tlstream_msgbuf_release(stream, acq_flags);
800 }
801 
__kbase_tlstream_tl_new_atom(struct kbase_tlstream * stream,const void * atom,u32 atom_nr)802 void __kbase_tlstream_tl_new_atom(
803 	struct kbase_tlstream *stream,
804 	const void *atom,
805 	u32 atom_nr
806 )
807 {
808 	const u32 msg_id = KBASE_TL_NEW_ATOM;
809 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
810 		+ sizeof(atom)
811 		+ sizeof(atom_nr)
812 		;
813 	char *buffer;
814 	unsigned long acq_flags;
815 	size_t pos = 0;
816 
817 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
818 
819 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
820 	pos = kbasep_serialize_timestamp(buffer, pos);
821 	pos = kbasep_serialize_bytes(buffer,
822 		pos, &atom, sizeof(atom));
823 	pos = kbasep_serialize_bytes(buffer,
824 		pos, &atom_nr, sizeof(atom_nr));
825 
826 	kbase_tlstream_msgbuf_release(stream, acq_flags);
827 }
828 
__kbase_tlstream_tl_new_as(struct kbase_tlstream * stream,const void * address_space,u32 as_nr)829 void __kbase_tlstream_tl_new_as(
830 	struct kbase_tlstream *stream,
831 	const void *address_space,
832 	u32 as_nr
833 )
834 {
835 	const u32 msg_id = KBASE_TL_NEW_AS;
836 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
837 		+ sizeof(address_space)
838 		+ sizeof(as_nr)
839 		;
840 	char *buffer;
841 	unsigned long acq_flags;
842 	size_t pos = 0;
843 
844 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
845 
846 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
847 	pos = kbasep_serialize_timestamp(buffer, pos);
848 	pos = kbasep_serialize_bytes(buffer,
849 		pos, &address_space, sizeof(address_space));
850 	pos = kbasep_serialize_bytes(buffer,
851 		pos, &as_nr, sizeof(as_nr));
852 
853 	kbase_tlstream_msgbuf_release(stream, acq_flags);
854 }
855 
__kbase_tlstream_tl_del_ctx(struct kbase_tlstream * stream,const void * ctx)856 void __kbase_tlstream_tl_del_ctx(
857 	struct kbase_tlstream *stream,
858 	const void *ctx
859 )
860 {
861 	const u32 msg_id = KBASE_TL_DEL_CTX;
862 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
863 		+ sizeof(ctx)
864 		;
865 	char *buffer;
866 	unsigned long acq_flags;
867 	size_t pos = 0;
868 
869 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
870 
871 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
872 	pos = kbasep_serialize_timestamp(buffer, pos);
873 	pos = kbasep_serialize_bytes(buffer,
874 		pos, &ctx, sizeof(ctx));
875 
876 	kbase_tlstream_msgbuf_release(stream, acq_flags);
877 }
878 
__kbase_tlstream_tl_del_atom(struct kbase_tlstream * stream,const void * atom)879 void __kbase_tlstream_tl_del_atom(
880 	struct kbase_tlstream *stream,
881 	const void *atom
882 )
883 {
884 	const u32 msg_id = KBASE_TL_DEL_ATOM;
885 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
886 		+ sizeof(atom)
887 		;
888 	char *buffer;
889 	unsigned long acq_flags;
890 	size_t pos = 0;
891 
892 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
893 
894 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
895 	pos = kbasep_serialize_timestamp(buffer, pos);
896 	pos = kbasep_serialize_bytes(buffer,
897 		pos, &atom, sizeof(atom));
898 
899 	kbase_tlstream_msgbuf_release(stream, acq_flags);
900 }
901 
__kbase_tlstream_tl_lifelink_lpu_gpu(struct kbase_tlstream * stream,const void * lpu,const void * gpu)902 void __kbase_tlstream_tl_lifelink_lpu_gpu(
903 	struct kbase_tlstream *stream,
904 	const void *lpu,
905 	const void *gpu
906 )
907 {
908 	const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
909 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
910 		+ sizeof(lpu)
911 		+ sizeof(gpu)
912 		;
913 	char *buffer;
914 	unsigned long acq_flags;
915 	size_t pos = 0;
916 
917 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
918 
919 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
920 	pos = kbasep_serialize_timestamp(buffer, pos);
921 	pos = kbasep_serialize_bytes(buffer,
922 		pos, &lpu, sizeof(lpu));
923 	pos = kbasep_serialize_bytes(buffer,
924 		pos, &gpu, sizeof(gpu));
925 
926 	kbase_tlstream_msgbuf_release(stream, acq_flags);
927 }
928 
__kbase_tlstream_tl_lifelink_as_gpu(struct kbase_tlstream * stream,const void * address_space,const void * gpu)929 void __kbase_tlstream_tl_lifelink_as_gpu(
930 	struct kbase_tlstream *stream,
931 	const void *address_space,
932 	const void *gpu
933 )
934 {
935 	const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
936 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
937 		+ sizeof(address_space)
938 		+ sizeof(gpu)
939 		;
940 	char *buffer;
941 	unsigned long acq_flags;
942 	size_t pos = 0;
943 
944 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
945 
946 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
947 	pos = kbasep_serialize_timestamp(buffer, pos);
948 	pos = kbasep_serialize_bytes(buffer,
949 		pos, &address_space, sizeof(address_space));
950 	pos = kbasep_serialize_bytes(buffer,
951 		pos, &gpu, sizeof(gpu));
952 
953 	kbase_tlstream_msgbuf_release(stream, acq_flags);
954 }
955 
__kbase_tlstream_tl_ret_ctx_lpu(struct kbase_tlstream * stream,const void * ctx,const void * lpu)956 void __kbase_tlstream_tl_ret_ctx_lpu(
957 	struct kbase_tlstream *stream,
958 	const void *ctx,
959 	const void *lpu
960 )
961 {
962 	const u32 msg_id = KBASE_TL_RET_CTX_LPU;
963 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
964 		+ sizeof(ctx)
965 		+ sizeof(lpu)
966 		;
967 	char *buffer;
968 	unsigned long acq_flags;
969 	size_t pos = 0;
970 
971 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
972 
973 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
974 	pos = kbasep_serialize_timestamp(buffer, pos);
975 	pos = kbasep_serialize_bytes(buffer,
976 		pos, &ctx, sizeof(ctx));
977 	pos = kbasep_serialize_bytes(buffer,
978 		pos, &lpu, sizeof(lpu));
979 
980 	kbase_tlstream_msgbuf_release(stream, acq_flags);
981 }
982 
__kbase_tlstream_tl_ret_atom_ctx(struct kbase_tlstream * stream,const void * atom,const void * ctx)983 void __kbase_tlstream_tl_ret_atom_ctx(
984 	struct kbase_tlstream *stream,
985 	const void *atom,
986 	const void *ctx
987 )
988 {
989 	const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
990 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
991 		+ sizeof(atom)
992 		+ sizeof(ctx)
993 		;
994 	char *buffer;
995 	unsigned long acq_flags;
996 	size_t pos = 0;
997 
998 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
999 
1000 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1001 	pos = kbasep_serialize_timestamp(buffer, pos);
1002 	pos = kbasep_serialize_bytes(buffer,
1003 		pos, &atom, sizeof(atom));
1004 	pos = kbasep_serialize_bytes(buffer,
1005 		pos, &ctx, sizeof(ctx));
1006 
1007 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1008 }
1009 
__kbase_tlstream_tl_ret_atom_lpu(struct kbase_tlstream * stream,const void * atom,const void * lpu,const char * attrib_match_list)1010 void __kbase_tlstream_tl_ret_atom_lpu(
1011 	struct kbase_tlstream *stream,
1012 	const void *atom,
1013 	const void *lpu,
1014 	const char *attrib_match_list
1015 )
1016 {
1017 	const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
1018 	const size_t s2 = sizeof(u32) + sizeof(char)
1019 		+ strnlen(attrib_match_list, STRLEN_MAX);
1020 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1021 		+ sizeof(atom)
1022 		+ sizeof(lpu)
1023 		+ s2
1024 		;
1025 	char *buffer;
1026 	unsigned long acq_flags;
1027 	size_t pos = 0;
1028 
1029 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1030 
1031 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1032 	pos = kbasep_serialize_timestamp(buffer, pos);
1033 	pos = kbasep_serialize_bytes(buffer,
1034 		pos, &atom, sizeof(atom));
1035 	pos = kbasep_serialize_bytes(buffer,
1036 		pos, &lpu, sizeof(lpu));
1037 	pos = kbasep_serialize_string(buffer,
1038 		pos, attrib_match_list, s2);
1039 
1040 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1041 }
1042 
__kbase_tlstream_tl_nret_ctx_lpu(struct kbase_tlstream * stream,const void * ctx,const void * lpu)1043 void __kbase_tlstream_tl_nret_ctx_lpu(
1044 	struct kbase_tlstream *stream,
1045 	const void *ctx,
1046 	const void *lpu
1047 )
1048 {
1049 	const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
1050 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1051 		+ sizeof(ctx)
1052 		+ sizeof(lpu)
1053 		;
1054 	char *buffer;
1055 	unsigned long acq_flags;
1056 	size_t pos = 0;
1057 
1058 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1059 
1060 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1061 	pos = kbasep_serialize_timestamp(buffer, pos);
1062 	pos = kbasep_serialize_bytes(buffer,
1063 		pos, &ctx, sizeof(ctx));
1064 	pos = kbasep_serialize_bytes(buffer,
1065 		pos, &lpu, sizeof(lpu));
1066 
1067 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1068 }
1069 
__kbase_tlstream_tl_nret_atom_ctx(struct kbase_tlstream * stream,const void * atom,const void * ctx)1070 void __kbase_tlstream_tl_nret_atom_ctx(
1071 	struct kbase_tlstream *stream,
1072 	const void *atom,
1073 	const void *ctx
1074 )
1075 {
1076 	const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
1077 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1078 		+ sizeof(atom)
1079 		+ sizeof(ctx)
1080 		;
1081 	char *buffer;
1082 	unsigned long acq_flags;
1083 	size_t pos = 0;
1084 
1085 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1086 
1087 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1088 	pos = kbasep_serialize_timestamp(buffer, pos);
1089 	pos = kbasep_serialize_bytes(buffer,
1090 		pos, &atom, sizeof(atom));
1091 	pos = kbasep_serialize_bytes(buffer,
1092 		pos, &ctx, sizeof(ctx));
1093 
1094 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1095 }
1096 
__kbase_tlstream_tl_nret_atom_lpu(struct kbase_tlstream * stream,const void * atom,const void * lpu)1097 void __kbase_tlstream_tl_nret_atom_lpu(
1098 	struct kbase_tlstream *stream,
1099 	const void *atom,
1100 	const void *lpu
1101 )
1102 {
1103 	const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
1104 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1105 		+ sizeof(atom)
1106 		+ sizeof(lpu)
1107 		;
1108 	char *buffer;
1109 	unsigned long acq_flags;
1110 	size_t pos = 0;
1111 
1112 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1113 
1114 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1115 	pos = kbasep_serialize_timestamp(buffer, pos);
1116 	pos = kbasep_serialize_bytes(buffer,
1117 		pos, &atom, sizeof(atom));
1118 	pos = kbasep_serialize_bytes(buffer,
1119 		pos, &lpu, sizeof(lpu));
1120 
1121 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1122 }
1123 
__kbase_tlstream_tl_ret_as_ctx(struct kbase_tlstream * stream,const void * address_space,const void * ctx)1124 void __kbase_tlstream_tl_ret_as_ctx(
1125 	struct kbase_tlstream *stream,
1126 	const void *address_space,
1127 	const void *ctx
1128 )
1129 {
1130 	const u32 msg_id = KBASE_TL_RET_AS_CTX;
1131 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1132 		+ sizeof(address_space)
1133 		+ sizeof(ctx)
1134 		;
1135 	char *buffer;
1136 	unsigned long acq_flags;
1137 	size_t pos = 0;
1138 
1139 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1140 
1141 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1142 	pos = kbasep_serialize_timestamp(buffer, pos);
1143 	pos = kbasep_serialize_bytes(buffer,
1144 		pos, &address_space, sizeof(address_space));
1145 	pos = kbasep_serialize_bytes(buffer,
1146 		pos, &ctx, sizeof(ctx));
1147 
1148 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1149 }
1150 
__kbase_tlstream_tl_nret_as_ctx(struct kbase_tlstream * stream,const void * address_space,const void * ctx)1151 void __kbase_tlstream_tl_nret_as_ctx(
1152 	struct kbase_tlstream *stream,
1153 	const void *address_space,
1154 	const void *ctx
1155 )
1156 {
1157 	const u32 msg_id = KBASE_TL_NRET_AS_CTX;
1158 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1159 		+ sizeof(address_space)
1160 		+ sizeof(ctx)
1161 		;
1162 	char *buffer;
1163 	unsigned long acq_flags;
1164 	size_t pos = 0;
1165 
1166 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1167 
1168 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1169 	pos = kbasep_serialize_timestamp(buffer, pos);
1170 	pos = kbasep_serialize_bytes(buffer,
1171 		pos, &address_space, sizeof(address_space));
1172 	pos = kbasep_serialize_bytes(buffer,
1173 		pos, &ctx, sizeof(ctx));
1174 
1175 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1176 }
1177 
__kbase_tlstream_tl_ret_atom_as(struct kbase_tlstream * stream,const void * atom,const void * address_space)1178 void __kbase_tlstream_tl_ret_atom_as(
1179 	struct kbase_tlstream *stream,
1180 	const void *atom,
1181 	const void *address_space
1182 )
1183 {
1184 	const u32 msg_id = KBASE_TL_RET_ATOM_AS;
1185 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1186 		+ sizeof(atom)
1187 		+ sizeof(address_space)
1188 		;
1189 	char *buffer;
1190 	unsigned long acq_flags;
1191 	size_t pos = 0;
1192 
1193 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1194 
1195 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1196 	pos = kbasep_serialize_timestamp(buffer, pos);
1197 	pos = kbasep_serialize_bytes(buffer,
1198 		pos, &atom, sizeof(atom));
1199 	pos = kbasep_serialize_bytes(buffer,
1200 		pos, &address_space, sizeof(address_space));
1201 
1202 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1203 }
1204 
__kbase_tlstream_tl_nret_atom_as(struct kbase_tlstream * stream,const void * atom,const void * address_space)1205 void __kbase_tlstream_tl_nret_atom_as(
1206 	struct kbase_tlstream *stream,
1207 	const void *atom,
1208 	const void *address_space
1209 )
1210 {
1211 	const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
1212 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1213 		+ sizeof(atom)
1214 		+ sizeof(address_space)
1215 		;
1216 	char *buffer;
1217 	unsigned long acq_flags;
1218 	size_t pos = 0;
1219 
1220 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1221 
1222 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1223 	pos = kbasep_serialize_timestamp(buffer, pos);
1224 	pos = kbasep_serialize_bytes(buffer,
1225 		pos, &atom, sizeof(atom));
1226 	pos = kbasep_serialize_bytes(buffer,
1227 		pos, &address_space, sizeof(address_space));
1228 
1229 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1230 }
1231 
__kbase_tlstream_tl_attrib_atom_config(struct kbase_tlstream * stream,const void * atom,u64 descriptor,u64 affinity,u32 config)1232 void __kbase_tlstream_tl_attrib_atom_config(
1233 	struct kbase_tlstream *stream,
1234 	const void *atom,
1235 	u64 descriptor,
1236 	u64 affinity,
1237 	u32 config
1238 )
1239 {
1240 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
1241 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1242 		+ sizeof(atom)
1243 		+ sizeof(descriptor)
1244 		+ sizeof(affinity)
1245 		+ sizeof(config)
1246 		;
1247 	char *buffer;
1248 	unsigned long acq_flags;
1249 	size_t pos = 0;
1250 
1251 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1252 
1253 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1254 	pos = kbasep_serialize_timestamp(buffer, pos);
1255 	pos = kbasep_serialize_bytes(buffer,
1256 		pos, &atom, sizeof(atom));
1257 	pos = kbasep_serialize_bytes(buffer,
1258 		pos, &descriptor, sizeof(descriptor));
1259 	pos = kbasep_serialize_bytes(buffer,
1260 		pos, &affinity, sizeof(affinity));
1261 	pos = kbasep_serialize_bytes(buffer,
1262 		pos, &config, sizeof(config));
1263 
1264 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1265 }
1266 
__kbase_tlstream_tl_jit_usedpages(struct kbase_tlstream * stream,u64 used_pages,u32 j_id)1267 void __kbase_tlstream_tl_jit_usedpages(
1268 	struct kbase_tlstream *stream,
1269 	u64 used_pages,
1270 	u32 j_id
1271 )
1272 {
1273 	const u32 msg_id = KBASE_TL_JIT_USEDPAGES;
1274 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1275 		+ sizeof(used_pages)
1276 		+ sizeof(j_id)
1277 		;
1278 	char *buffer;
1279 	unsigned long acq_flags;
1280 	size_t pos = 0;
1281 
1282 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1283 
1284 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1285 	pos = kbasep_serialize_timestamp(buffer, pos);
1286 	pos = kbasep_serialize_bytes(buffer,
1287 		pos, &used_pages, sizeof(used_pages));
1288 	pos = kbasep_serialize_bytes(buffer,
1289 		pos, &j_id, sizeof(j_id));
1290 
1291 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1292 }
1293 
__kbase_tlstream_tl_attrib_atom_jitallocinfo(struct kbase_tlstream * stream,const void * atom,u64 va_pgs,u64 com_pgs,u64 extent,u32 j_id,u32 bin_id,u32 max_allocs,u32 jit_flags,u32 usg_id)1294 void __kbase_tlstream_tl_attrib_atom_jitallocinfo(
1295 	struct kbase_tlstream *stream,
1296 	const void *atom,
1297 	u64 va_pgs,
1298 	u64 com_pgs,
1299 	u64 extent,
1300 	u32 j_id,
1301 	u32 bin_id,
1302 	u32 max_allocs,
1303 	u32 jit_flags,
1304 	u32 usg_id
1305 )
1306 {
1307 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
1308 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1309 		+ sizeof(atom)
1310 		+ sizeof(va_pgs)
1311 		+ sizeof(com_pgs)
1312 		+ sizeof(extent)
1313 		+ sizeof(j_id)
1314 		+ sizeof(bin_id)
1315 		+ sizeof(max_allocs)
1316 		+ sizeof(jit_flags)
1317 		+ sizeof(usg_id)
1318 		;
1319 	char *buffer;
1320 	unsigned long acq_flags;
1321 	size_t pos = 0;
1322 
1323 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1324 
1325 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1326 	pos = kbasep_serialize_timestamp(buffer, pos);
1327 	pos = kbasep_serialize_bytes(buffer,
1328 		pos, &atom, sizeof(atom));
1329 	pos = kbasep_serialize_bytes(buffer,
1330 		pos, &va_pgs, sizeof(va_pgs));
1331 	pos = kbasep_serialize_bytes(buffer,
1332 		pos, &com_pgs, sizeof(com_pgs));
1333 	pos = kbasep_serialize_bytes(buffer,
1334 		pos, &extent, sizeof(extent));
1335 	pos = kbasep_serialize_bytes(buffer,
1336 		pos, &j_id, sizeof(j_id));
1337 	pos = kbasep_serialize_bytes(buffer,
1338 		pos, &bin_id, sizeof(bin_id));
1339 	pos = kbasep_serialize_bytes(buffer,
1340 		pos, &max_allocs, sizeof(max_allocs));
1341 	pos = kbasep_serialize_bytes(buffer,
1342 		pos, &jit_flags, sizeof(jit_flags));
1343 	pos = kbasep_serialize_bytes(buffer,
1344 		pos, &usg_id, sizeof(usg_id));
1345 
1346 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1347 }
1348 
__kbase_tlstream_tl_attrib_atom_jitfreeinfo(struct kbase_tlstream * stream,const void * atom,u32 j_id)1349 void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
1350 	struct kbase_tlstream *stream,
1351 	const void *atom,
1352 	u32 j_id
1353 )
1354 {
1355 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
1356 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1357 		+ sizeof(atom)
1358 		+ sizeof(j_id)
1359 		;
1360 	char *buffer;
1361 	unsigned long acq_flags;
1362 	size_t pos = 0;
1363 
1364 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1365 
1366 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1367 	pos = kbasep_serialize_timestamp(buffer, pos);
1368 	pos = kbasep_serialize_bytes(buffer,
1369 		pos, &atom, sizeof(atom));
1370 	pos = kbasep_serialize_bytes(buffer,
1371 		pos, &j_id, sizeof(j_id));
1372 
1373 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1374 }
1375 
__kbase_tlstream_tl_attrib_as_config(struct kbase_tlstream * stream,const void * address_space,u64 transtab,u64 memattr,u64 transcfg)1376 void __kbase_tlstream_tl_attrib_as_config(
1377 	struct kbase_tlstream *stream,
1378 	const void *address_space,
1379 	u64 transtab,
1380 	u64 memattr,
1381 	u64 transcfg
1382 )
1383 {
1384 	const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
1385 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1386 		+ sizeof(address_space)
1387 		+ sizeof(transtab)
1388 		+ sizeof(memattr)
1389 		+ sizeof(transcfg)
1390 		;
1391 	char *buffer;
1392 	unsigned long acq_flags;
1393 	size_t pos = 0;
1394 
1395 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1396 
1397 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1398 	pos = kbasep_serialize_timestamp(buffer, pos);
1399 	pos = kbasep_serialize_bytes(buffer,
1400 		pos, &address_space, sizeof(address_space));
1401 	pos = kbasep_serialize_bytes(buffer,
1402 		pos, &transtab, sizeof(transtab));
1403 	pos = kbasep_serialize_bytes(buffer,
1404 		pos, &memattr, sizeof(memattr));
1405 	pos = kbasep_serialize_bytes(buffer,
1406 		pos, &transcfg, sizeof(transcfg));
1407 
1408 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1409 }
1410 
__kbase_tlstream_tl_event_lpu_softstop(struct kbase_tlstream * stream,const void * lpu)1411 void __kbase_tlstream_tl_event_lpu_softstop(
1412 	struct kbase_tlstream *stream,
1413 	const void *lpu
1414 )
1415 {
1416 	const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
1417 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1418 		+ sizeof(lpu)
1419 		;
1420 	char *buffer;
1421 	unsigned long acq_flags;
1422 	size_t pos = 0;
1423 
1424 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1425 
1426 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1427 	pos = kbasep_serialize_timestamp(buffer, pos);
1428 	pos = kbasep_serialize_bytes(buffer,
1429 		pos, &lpu, sizeof(lpu));
1430 
1431 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1432 }
1433 
__kbase_tlstream_tl_event_atom_softstop_ex(struct kbase_tlstream * stream,const void * atom)1434 void __kbase_tlstream_tl_event_atom_softstop_ex(
1435 	struct kbase_tlstream *stream,
1436 	const void *atom
1437 )
1438 {
1439 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
1440 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1441 		+ sizeof(atom)
1442 		;
1443 	char *buffer;
1444 	unsigned long acq_flags;
1445 	size_t pos = 0;
1446 
1447 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1448 
1449 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1450 	pos = kbasep_serialize_timestamp(buffer, pos);
1451 	pos = kbasep_serialize_bytes(buffer,
1452 		pos, &atom, sizeof(atom));
1453 
1454 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1455 }
1456 
__kbase_tlstream_tl_event_atom_softstop_issue(struct kbase_tlstream * stream,const void * atom)1457 void __kbase_tlstream_tl_event_atom_softstop_issue(
1458 	struct kbase_tlstream *stream,
1459 	const void *atom
1460 )
1461 {
1462 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
1463 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1464 		+ sizeof(atom)
1465 		;
1466 	char *buffer;
1467 	unsigned long acq_flags;
1468 	size_t pos = 0;
1469 
1470 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1471 
1472 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1473 	pos = kbasep_serialize_timestamp(buffer, pos);
1474 	pos = kbasep_serialize_bytes(buffer,
1475 		pos, &atom, sizeof(atom));
1476 
1477 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1478 }
1479 
__kbase_tlstream_tl_event_atom_softjob_start(struct kbase_tlstream * stream,const void * atom)1480 void __kbase_tlstream_tl_event_atom_softjob_start(
1481 	struct kbase_tlstream *stream,
1482 	const void *atom
1483 )
1484 {
1485 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
1486 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1487 		+ sizeof(atom)
1488 		;
1489 	char *buffer;
1490 	unsigned long acq_flags;
1491 	size_t pos = 0;
1492 
1493 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1494 
1495 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1496 	pos = kbasep_serialize_timestamp(buffer, pos);
1497 	pos = kbasep_serialize_bytes(buffer,
1498 		pos, &atom, sizeof(atom));
1499 
1500 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1501 }
1502 
__kbase_tlstream_tl_event_atom_softjob_end(struct kbase_tlstream * stream,const void * atom)1503 void __kbase_tlstream_tl_event_atom_softjob_end(
1504 	struct kbase_tlstream *stream,
1505 	const void *atom
1506 )
1507 {
1508 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
1509 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1510 		+ sizeof(atom)
1511 		;
1512 	char *buffer;
1513 	unsigned long acq_flags;
1514 	size_t pos = 0;
1515 
1516 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1517 
1518 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1519 	pos = kbasep_serialize_timestamp(buffer, pos);
1520 	pos = kbasep_serialize_bytes(buffer,
1521 		pos, &atom, sizeof(atom));
1522 
1523 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1524 }
1525 
__kbase_tlstream_tl_arbiter_granted(struct kbase_tlstream * stream,const void * gpu)1526 void __kbase_tlstream_tl_arbiter_granted(
1527 	struct kbase_tlstream *stream,
1528 	const void *gpu
1529 )
1530 {
1531 	const u32 msg_id = KBASE_TL_ARBITER_GRANTED;
1532 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1533 		+ sizeof(gpu)
1534 		;
1535 	char *buffer;
1536 	unsigned long acq_flags;
1537 	size_t pos = 0;
1538 
1539 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1540 
1541 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1542 	pos = kbasep_serialize_timestamp(buffer, pos);
1543 	pos = kbasep_serialize_bytes(buffer,
1544 		pos, &gpu, sizeof(gpu));
1545 
1546 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1547 }
1548 
__kbase_tlstream_tl_arbiter_started(struct kbase_tlstream * stream,const void * gpu)1549 void __kbase_tlstream_tl_arbiter_started(
1550 	struct kbase_tlstream *stream,
1551 	const void *gpu
1552 )
1553 {
1554 	const u32 msg_id = KBASE_TL_ARBITER_STARTED;
1555 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1556 		+ sizeof(gpu)
1557 		;
1558 	char *buffer;
1559 	unsigned long acq_flags;
1560 	size_t pos = 0;
1561 
1562 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1563 
1564 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1565 	pos = kbasep_serialize_timestamp(buffer, pos);
1566 	pos = kbasep_serialize_bytes(buffer,
1567 		pos, &gpu, sizeof(gpu));
1568 
1569 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1570 }
1571 
__kbase_tlstream_tl_arbiter_stop_requested(struct kbase_tlstream * stream,const void * gpu)1572 void __kbase_tlstream_tl_arbiter_stop_requested(
1573 	struct kbase_tlstream *stream,
1574 	const void *gpu
1575 )
1576 {
1577 	const u32 msg_id = KBASE_TL_ARBITER_STOP_REQUESTED;
1578 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1579 		+ sizeof(gpu)
1580 		;
1581 	char *buffer;
1582 	unsigned long acq_flags;
1583 	size_t pos = 0;
1584 
1585 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1586 
1587 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1588 	pos = kbasep_serialize_timestamp(buffer, pos);
1589 	pos = kbasep_serialize_bytes(buffer,
1590 		pos, &gpu, sizeof(gpu));
1591 
1592 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1593 }
1594 
__kbase_tlstream_tl_arbiter_stopped(struct kbase_tlstream * stream,const void * gpu)1595 void __kbase_tlstream_tl_arbiter_stopped(
1596 	struct kbase_tlstream *stream,
1597 	const void *gpu
1598 )
1599 {
1600 	const u32 msg_id = KBASE_TL_ARBITER_STOPPED;
1601 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1602 		+ sizeof(gpu)
1603 		;
1604 	char *buffer;
1605 	unsigned long acq_flags;
1606 	size_t pos = 0;
1607 
1608 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1609 
1610 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1611 	pos = kbasep_serialize_timestamp(buffer, pos);
1612 	pos = kbasep_serialize_bytes(buffer,
1613 		pos, &gpu, sizeof(gpu));
1614 
1615 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1616 }
1617 
__kbase_tlstream_tl_arbiter_requested(struct kbase_tlstream * stream,const void * gpu)1618 void __kbase_tlstream_tl_arbiter_requested(
1619 	struct kbase_tlstream *stream,
1620 	const void *gpu
1621 )
1622 {
1623 	const u32 msg_id = KBASE_TL_ARBITER_REQUESTED;
1624 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1625 		+ sizeof(gpu)
1626 		;
1627 	char *buffer;
1628 	unsigned long acq_flags;
1629 	size_t pos = 0;
1630 
1631 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1632 
1633 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1634 	pos = kbasep_serialize_timestamp(buffer, pos);
1635 	pos = kbasep_serialize_bytes(buffer,
1636 		pos, &gpu, sizeof(gpu));
1637 
1638 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1639 }
1640 
__kbase_tlstream_jd_gpu_soft_reset(struct kbase_tlstream * stream,const void * gpu)1641 void __kbase_tlstream_jd_gpu_soft_reset(
1642 	struct kbase_tlstream *stream,
1643 	const void *gpu
1644 )
1645 {
1646 	const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
1647 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1648 		+ sizeof(gpu)
1649 		;
1650 	char *buffer;
1651 	unsigned long acq_flags;
1652 	size_t pos = 0;
1653 
1654 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1655 
1656 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1657 	pos = kbasep_serialize_timestamp(buffer, pos);
1658 	pos = kbasep_serialize_bytes(buffer,
1659 		pos, &gpu, sizeof(gpu));
1660 
1661 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1662 }
1663 
__kbase_tlstream_jd_tiler_heap_chunk_alloc(struct kbase_tlstream * stream,u32 ctx_nr,u64 heap_id,u64 chunk_va)1664 void __kbase_tlstream_jd_tiler_heap_chunk_alloc(
1665 	struct kbase_tlstream *stream,
1666 	u32 ctx_nr,
1667 	u64 heap_id,
1668 	u64 chunk_va
1669 )
1670 {
1671 	const u32 msg_id = KBASE_JD_TILER_HEAP_CHUNK_ALLOC;
1672 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1673 		+ sizeof(ctx_nr)
1674 		+ sizeof(heap_id)
1675 		+ sizeof(chunk_va)
1676 		;
1677 	char *buffer;
1678 	unsigned long acq_flags;
1679 	size_t pos = 0;
1680 
1681 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1682 
1683 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1684 	pos = kbasep_serialize_timestamp(buffer, pos);
1685 	pos = kbasep_serialize_bytes(buffer,
1686 		pos, &ctx_nr, sizeof(ctx_nr));
1687 	pos = kbasep_serialize_bytes(buffer,
1688 		pos, &heap_id, sizeof(heap_id));
1689 	pos = kbasep_serialize_bytes(buffer,
1690 		pos, &chunk_va, sizeof(chunk_va));
1691 
1692 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1693 }
1694 
__kbase_tlstream_tl_js_sched_start(struct kbase_tlstream * stream,u32 dummy)1695 void __kbase_tlstream_tl_js_sched_start(
1696 	struct kbase_tlstream *stream,
1697 	u32 dummy
1698 )
1699 {
1700 	const u32 msg_id = KBASE_TL_JS_SCHED_START;
1701 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1702 		+ sizeof(dummy)
1703 		;
1704 	char *buffer;
1705 	unsigned long acq_flags;
1706 	size_t pos = 0;
1707 
1708 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1709 
1710 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1711 	pos = kbasep_serialize_timestamp(buffer, pos);
1712 	pos = kbasep_serialize_bytes(buffer,
1713 		pos, &dummy, sizeof(dummy));
1714 
1715 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1716 }
1717 
__kbase_tlstream_tl_js_sched_end(struct kbase_tlstream * stream,u32 dummy)1718 void __kbase_tlstream_tl_js_sched_end(
1719 	struct kbase_tlstream *stream,
1720 	u32 dummy
1721 )
1722 {
1723 	const u32 msg_id = KBASE_TL_JS_SCHED_END;
1724 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1725 		+ sizeof(dummy)
1726 		;
1727 	char *buffer;
1728 	unsigned long acq_flags;
1729 	size_t pos = 0;
1730 
1731 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1732 
1733 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1734 	pos = kbasep_serialize_timestamp(buffer, pos);
1735 	pos = kbasep_serialize_bytes(buffer,
1736 		pos, &dummy, sizeof(dummy));
1737 
1738 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1739 }
1740 
__kbase_tlstream_tl_jd_submit_atom_start(struct kbase_tlstream * stream,const void * atom)1741 void __kbase_tlstream_tl_jd_submit_atom_start(
1742 	struct kbase_tlstream *stream,
1743 	const void *atom
1744 )
1745 {
1746 	const u32 msg_id = KBASE_TL_JD_SUBMIT_ATOM_START;
1747 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1748 		+ sizeof(atom)
1749 		;
1750 	char *buffer;
1751 	unsigned long acq_flags;
1752 	size_t pos = 0;
1753 
1754 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1755 
1756 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1757 	pos = kbasep_serialize_timestamp(buffer, pos);
1758 	pos = kbasep_serialize_bytes(buffer,
1759 		pos, &atom, sizeof(atom));
1760 
1761 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1762 }
1763 
__kbase_tlstream_tl_jd_submit_atom_end(struct kbase_tlstream * stream,const void * atom)1764 void __kbase_tlstream_tl_jd_submit_atom_end(
1765 	struct kbase_tlstream *stream,
1766 	const void *atom
1767 )
1768 {
1769 	const u32 msg_id = KBASE_TL_JD_SUBMIT_ATOM_END;
1770 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1771 		+ sizeof(atom)
1772 		;
1773 	char *buffer;
1774 	unsigned long acq_flags;
1775 	size_t pos = 0;
1776 
1777 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1778 
1779 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1780 	pos = kbasep_serialize_timestamp(buffer, pos);
1781 	pos = kbasep_serialize_bytes(buffer,
1782 		pos, &atom, sizeof(atom));
1783 
1784 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1785 }
1786 
__kbase_tlstream_tl_jd_done_no_lock_start(struct kbase_tlstream * stream,const void * atom)1787 void __kbase_tlstream_tl_jd_done_no_lock_start(
1788 	struct kbase_tlstream *stream,
1789 	const void *atom
1790 )
1791 {
1792 	const u32 msg_id = KBASE_TL_JD_DONE_NO_LOCK_START;
1793 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1794 		+ sizeof(atom)
1795 		;
1796 	char *buffer;
1797 	unsigned long acq_flags;
1798 	size_t pos = 0;
1799 
1800 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1801 
1802 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1803 	pos = kbasep_serialize_timestamp(buffer, pos);
1804 	pos = kbasep_serialize_bytes(buffer,
1805 		pos, &atom, sizeof(atom));
1806 
1807 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1808 }
1809 
__kbase_tlstream_tl_jd_done_no_lock_end(struct kbase_tlstream * stream,const void * atom)1810 void __kbase_tlstream_tl_jd_done_no_lock_end(
1811 	struct kbase_tlstream *stream,
1812 	const void *atom
1813 )
1814 {
1815 	const u32 msg_id = KBASE_TL_JD_DONE_NO_LOCK_END;
1816 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1817 		+ sizeof(atom)
1818 		;
1819 	char *buffer;
1820 	unsigned long acq_flags;
1821 	size_t pos = 0;
1822 
1823 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1824 
1825 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1826 	pos = kbasep_serialize_timestamp(buffer, pos);
1827 	pos = kbasep_serialize_bytes(buffer,
1828 		pos, &atom, sizeof(atom));
1829 
1830 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1831 }
1832 
__kbase_tlstream_tl_jd_done_start(struct kbase_tlstream * stream,const void * atom)1833 void __kbase_tlstream_tl_jd_done_start(
1834 	struct kbase_tlstream *stream,
1835 	const void *atom
1836 )
1837 {
1838 	const u32 msg_id = KBASE_TL_JD_DONE_START;
1839 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1840 		+ sizeof(atom)
1841 		;
1842 	char *buffer;
1843 	unsigned long acq_flags;
1844 	size_t pos = 0;
1845 
1846 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1847 
1848 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1849 	pos = kbasep_serialize_timestamp(buffer, pos);
1850 	pos = kbasep_serialize_bytes(buffer,
1851 		pos, &atom, sizeof(atom));
1852 
1853 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1854 }
1855 
__kbase_tlstream_tl_jd_done_end(struct kbase_tlstream * stream,const void * atom)1856 void __kbase_tlstream_tl_jd_done_end(
1857 	struct kbase_tlstream *stream,
1858 	const void *atom
1859 )
1860 {
1861 	const u32 msg_id = KBASE_TL_JD_DONE_END;
1862 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1863 		+ sizeof(atom)
1864 		;
1865 	char *buffer;
1866 	unsigned long acq_flags;
1867 	size_t pos = 0;
1868 
1869 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1870 
1871 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1872 	pos = kbasep_serialize_timestamp(buffer, pos);
1873 	pos = kbasep_serialize_bytes(buffer,
1874 		pos, &atom, sizeof(atom));
1875 
1876 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1877 }
1878 
__kbase_tlstream_tl_jd_atom_complete(struct kbase_tlstream * stream,const void * atom)1879 void __kbase_tlstream_tl_jd_atom_complete(
1880 	struct kbase_tlstream *stream,
1881 	const void *atom
1882 )
1883 {
1884 	const u32 msg_id = KBASE_TL_JD_ATOM_COMPLETE;
1885 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1886 		+ sizeof(atom)
1887 		;
1888 	char *buffer;
1889 	unsigned long acq_flags;
1890 	size_t pos = 0;
1891 
1892 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1893 
1894 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1895 	pos = kbasep_serialize_timestamp(buffer, pos);
1896 	pos = kbasep_serialize_bytes(buffer,
1897 		pos, &atom, sizeof(atom));
1898 
1899 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1900 }
1901 
__kbase_tlstream_tl_run_atom_start(struct kbase_tlstream * stream,const void * atom,u32 atom_nr)1902 void __kbase_tlstream_tl_run_atom_start(
1903 	struct kbase_tlstream *stream,
1904 	const void *atom,
1905 	u32 atom_nr
1906 )
1907 {
1908 	const u32 msg_id = KBASE_TL_RUN_ATOM_START;
1909 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1910 		+ sizeof(atom)
1911 		+ sizeof(atom_nr)
1912 		;
1913 	char *buffer;
1914 	unsigned long acq_flags;
1915 	size_t pos = 0;
1916 
1917 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1918 
1919 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1920 	pos = kbasep_serialize_timestamp(buffer, pos);
1921 	pos = kbasep_serialize_bytes(buffer,
1922 		pos, &atom, sizeof(atom));
1923 	pos = kbasep_serialize_bytes(buffer,
1924 		pos, &atom_nr, sizeof(atom_nr));
1925 
1926 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1927 }
1928 
__kbase_tlstream_tl_run_atom_end(struct kbase_tlstream * stream,const void * atom,u32 atom_nr)1929 void __kbase_tlstream_tl_run_atom_end(
1930 	struct kbase_tlstream *stream,
1931 	const void *atom,
1932 	u32 atom_nr
1933 )
1934 {
1935 	const u32 msg_id = KBASE_TL_RUN_ATOM_END;
1936 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1937 		+ sizeof(atom)
1938 		+ sizeof(atom_nr)
1939 		;
1940 	char *buffer;
1941 	unsigned long acq_flags;
1942 	size_t pos = 0;
1943 
1944 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1945 
1946 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1947 	pos = kbasep_serialize_timestamp(buffer, pos);
1948 	pos = kbasep_serialize_bytes(buffer,
1949 		pos, &atom, sizeof(atom));
1950 	pos = kbasep_serialize_bytes(buffer,
1951 		pos, &atom_nr, sizeof(atom_nr));
1952 
1953 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1954 }
1955 
__kbase_tlstream_tl_attrib_atom_priority(struct kbase_tlstream * stream,const void * atom,u32 prio)1956 void __kbase_tlstream_tl_attrib_atom_priority(
1957 	struct kbase_tlstream *stream,
1958 	const void *atom,
1959 	u32 prio
1960 )
1961 {
1962 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
1963 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1964 		+ sizeof(atom)
1965 		+ sizeof(prio)
1966 		;
1967 	char *buffer;
1968 	unsigned long acq_flags;
1969 	size_t pos = 0;
1970 
1971 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1972 
1973 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
1974 	pos = kbasep_serialize_timestamp(buffer, pos);
1975 	pos = kbasep_serialize_bytes(buffer,
1976 		pos, &atom, sizeof(atom));
1977 	pos = kbasep_serialize_bytes(buffer,
1978 		pos, &prio, sizeof(prio));
1979 
1980 	kbase_tlstream_msgbuf_release(stream, acq_flags);
1981 }
1982 
__kbase_tlstream_tl_attrib_atom_state(struct kbase_tlstream * stream,const void * atom,u32 state)1983 void __kbase_tlstream_tl_attrib_atom_state(
1984 	struct kbase_tlstream *stream,
1985 	const void *atom,
1986 	u32 state
1987 )
1988 {
1989 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
1990 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
1991 		+ sizeof(atom)
1992 		+ sizeof(state)
1993 		;
1994 	char *buffer;
1995 	unsigned long acq_flags;
1996 	size_t pos = 0;
1997 
1998 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
1999 
2000 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2001 	pos = kbasep_serialize_timestamp(buffer, pos);
2002 	pos = kbasep_serialize_bytes(buffer,
2003 		pos, &atom, sizeof(atom));
2004 	pos = kbasep_serialize_bytes(buffer,
2005 		pos, &state, sizeof(state));
2006 
2007 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2008 }
2009 
__kbase_tlstream_tl_attrib_atom_prioritized(struct kbase_tlstream * stream,const void * atom)2010 void __kbase_tlstream_tl_attrib_atom_prioritized(
2011 	struct kbase_tlstream *stream,
2012 	const void *atom
2013 )
2014 {
2015 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
2016 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2017 		+ sizeof(atom)
2018 		;
2019 	char *buffer;
2020 	unsigned long acq_flags;
2021 	size_t pos = 0;
2022 
2023 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2024 
2025 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2026 	pos = kbasep_serialize_timestamp(buffer, pos);
2027 	pos = kbasep_serialize_bytes(buffer,
2028 		pos, &atom, sizeof(atom));
2029 
2030 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2031 }
2032 
__kbase_tlstream_tl_attrib_atom_jit(struct kbase_tlstream * stream,const void * atom,u64 edit_addr,u64 new_addr,u32 jit_flags,u64 mem_flags,u32 j_id,u64 com_pgs,u64 extent,u64 va_pgs)2033 void __kbase_tlstream_tl_attrib_atom_jit(
2034 	struct kbase_tlstream *stream,
2035 	const void *atom,
2036 	u64 edit_addr,
2037 	u64 new_addr,
2038 	u32 jit_flags,
2039 	u64 mem_flags,
2040 	u32 j_id,
2041 	u64 com_pgs,
2042 	u64 extent,
2043 	u64 va_pgs
2044 )
2045 {
2046 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
2047 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2048 		+ sizeof(atom)
2049 		+ sizeof(edit_addr)
2050 		+ sizeof(new_addr)
2051 		+ sizeof(jit_flags)
2052 		+ sizeof(mem_flags)
2053 		+ sizeof(j_id)
2054 		+ sizeof(com_pgs)
2055 		+ sizeof(extent)
2056 		+ sizeof(va_pgs)
2057 		;
2058 	char *buffer;
2059 	unsigned long acq_flags;
2060 	size_t pos = 0;
2061 
2062 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2063 
2064 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2065 	pos = kbasep_serialize_timestamp(buffer, pos);
2066 	pos = kbasep_serialize_bytes(buffer,
2067 		pos, &atom, sizeof(atom));
2068 	pos = kbasep_serialize_bytes(buffer,
2069 		pos, &edit_addr, sizeof(edit_addr));
2070 	pos = kbasep_serialize_bytes(buffer,
2071 		pos, &new_addr, sizeof(new_addr));
2072 	pos = kbasep_serialize_bytes(buffer,
2073 		pos, &jit_flags, sizeof(jit_flags));
2074 	pos = kbasep_serialize_bytes(buffer,
2075 		pos, &mem_flags, sizeof(mem_flags));
2076 	pos = kbasep_serialize_bytes(buffer,
2077 		pos, &j_id, sizeof(j_id));
2078 	pos = kbasep_serialize_bytes(buffer,
2079 		pos, &com_pgs, sizeof(com_pgs));
2080 	pos = kbasep_serialize_bytes(buffer,
2081 		pos, &extent, sizeof(extent));
2082 	pos = kbasep_serialize_bytes(buffer,
2083 		pos, &va_pgs, sizeof(va_pgs));
2084 
2085 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2086 }
2087 
__kbase_tlstream_tl_kbase_new_device(struct kbase_tlstream * stream,u32 kbase_device_id,u32 kbase_device_gpu_core_count,u32 kbase_device_max_num_csgs,u32 kbase_device_as_count,u32 kbase_device_sb_entry_count,u32 kbase_device_has_cross_stream_sync,u32 kbase_device_supports_gpu_sleep)2088 void __kbase_tlstream_tl_kbase_new_device(
2089 	struct kbase_tlstream *stream,
2090 	u32 kbase_device_id,
2091 	u32 kbase_device_gpu_core_count,
2092 	u32 kbase_device_max_num_csgs,
2093 	u32 kbase_device_as_count,
2094 	u32 kbase_device_sb_entry_count,
2095 	u32 kbase_device_has_cross_stream_sync,
2096 	u32 kbase_device_supports_gpu_sleep
2097 )
2098 {
2099 	const u32 msg_id = KBASE_TL_KBASE_NEW_DEVICE;
2100 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2101 		+ sizeof(kbase_device_id)
2102 		+ sizeof(kbase_device_gpu_core_count)
2103 		+ sizeof(kbase_device_max_num_csgs)
2104 		+ sizeof(kbase_device_as_count)
2105 		+ sizeof(kbase_device_sb_entry_count)
2106 		+ sizeof(kbase_device_has_cross_stream_sync)
2107 		+ sizeof(kbase_device_supports_gpu_sleep)
2108 		;
2109 	char *buffer;
2110 	unsigned long acq_flags;
2111 	size_t pos = 0;
2112 
2113 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2114 
2115 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2116 	pos = kbasep_serialize_timestamp(buffer, pos);
2117 	pos = kbasep_serialize_bytes(buffer,
2118 		pos, &kbase_device_id, sizeof(kbase_device_id));
2119 	pos = kbasep_serialize_bytes(buffer,
2120 		pos, &kbase_device_gpu_core_count, sizeof(kbase_device_gpu_core_count));
2121 	pos = kbasep_serialize_bytes(buffer,
2122 		pos, &kbase_device_max_num_csgs, sizeof(kbase_device_max_num_csgs));
2123 	pos = kbasep_serialize_bytes(buffer,
2124 		pos, &kbase_device_as_count, sizeof(kbase_device_as_count));
2125 	pos = kbasep_serialize_bytes(buffer,
2126 		pos, &kbase_device_sb_entry_count, sizeof(kbase_device_sb_entry_count));
2127 	pos = kbasep_serialize_bytes(buffer,
2128 		pos, &kbase_device_has_cross_stream_sync, sizeof(kbase_device_has_cross_stream_sync));
2129 	pos = kbasep_serialize_bytes(buffer,
2130 		pos, &kbase_device_supports_gpu_sleep, sizeof(kbase_device_supports_gpu_sleep));
2131 
2132 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2133 }
2134 
__kbase_tlstream_tl_kbase_gpucmdqueue_kick(struct kbase_tlstream * stream,u32 kernel_ctx_id,u64 buffer_gpu_addr)2135 void __kbase_tlstream_tl_kbase_gpucmdqueue_kick(
2136 	struct kbase_tlstream *stream,
2137 	u32 kernel_ctx_id,
2138 	u64 buffer_gpu_addr
2139 )
2140 {
2141 	const u32 msg_id = KBASE_TL_KBASE_GPUCMDQUEUE_KICK;
2142 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2143 		+ sizeof(kernel_ctx_id)
2144 		+ sizeof(buffer_gpu_addr)
2145 		;
2146 	char *buffer;
2147 	unsigned long acq_flags;
2148 	size_t pos = 0;
2149 
2150 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2151 
2152 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2153 	pos = kbasep_serialize_timestamp(buffer, pos);
2154 	pos = kbasep_serialize_bytes(buffer,
2155 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2156 	pos = kbasep_serialize_bytes(buffer,
2157 		pos, &buffer_gpu_addr, sizeof(buffer_gpu_addr));
2158 
2159 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2160 }
2161 
__kbase_tlstream_tl_kbase_device_program_csg(struct kbase_tlstream * stream,u32 kbase_device_id,u32 kernel_ctx_id,u32 gpu_cmdq_grp_handle,u32 kbase_device_csg_slot_index,u32 kbase_device_csg_slot_resuming)2162 void __kbase_tlstream_tl_kbase_device_program_csg(
2163 	struct kbase_tlstream *stream,
2164 	u32 kbase_device_id,
2165 	u32 kernel_ctx_id,
2166 	u32 gpu_cmdq_grp_handle,
2167 	u32 kbase_device_csg_slot_index,
2168 	u32 kbase_device_csg_slot_resuming
2169 )
2170 {
2171 	const u32 msg_id = KBASE_TL_KBASE_DEVICE_PROGRAM_CSG;
2172 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2173 		+ sizeof(kbase_device_id)
2174 		+ sizeof(kernel_ctx_id)
2175 		+ sizeof(gpu_cmdq_grp_handle)
2176 		+ sizeof(kbase_device_csg_slot_index)
2177 		+ sizeof(kbase_device_csg_slot_resuming)
2178 		;
2179 	char *buffer;
2180 	unsigned long acq_flags;
2181 	size_t pos = 0;
2182 
2183 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2184 
2185 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2186 	pos = kbasep_serialize_timestamp(buffer, pos);
2187 	pos = kbasep_serialize_bytes(buffer,
2188 		pos, &kbase_device_id, sizeof(kbase_device_id));
2189 	pos = kbasep_serialize_bytes(buffer,
2190 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2191 	pos = kbasep_serialize_bytes(buffer,
2192 		pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
2193 	pos = kbasep_serialize_bytes(buffer,
2194 		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
2195 	pos = kbasep_serialize_bytes(buffer,
2196 		pos, &kbase_device_csg_slot_resuming, sizeof(kbase_device_csg_slot_resuming));
2197 
2198 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2199 }
2200 
__kbase_tlstream_tl_kbase_device_deprogram_csg(struct kbase_tlstream * stream,u32 kbase_device_id,u32 kbase_device_csg_slot_index)2201 void __kbase_tlstream_tl_kbase_device_deprogram_csg(
2202 	struct kbase_tlstream *stream,
2203 	u32 kbase_device_id,
2204 	u32 kbase_device_csg_slot_index
2205 )
2206 {
2207 	const u32 msg_id = KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG;
2208 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2209 		+ sizeof(kbase_device_id)
2210 		+ sizeof(kbase_device_csg_slot_index)
2211 		;
2212 	char *buffer;
2213 	unsigned long acq_flags;
2214 	size_t pos = 0;
2215 
2216 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2217 
2218 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2219 	pos = kbasep_serialize_timestamp(buffer, pos);
2220 	pos = kbasep_serialize_bytes(buffer,
2221 		pos, &kbase_device_id, sizeof(kbase_device_id));
2222 	pos = kbasep_serialize_bytes(buffer,
2223 		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
2224 
2225 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2226 }
2227 
__kbase_tlstream_tl_kbase_device_halting_csg(struct kbase_tlstream * stream,u32 kbase_device_id,u32 kbase_device_csg_slot_index,u32 kbase_device_csg_slot_suspending)2228 void __kbase_tlstream_tl_kbase_device_halting_csg(
2229 	struct kbase_tlstream *stream,
2230 	u32 kbase_device_id,
2231 	u32 kbase_device_csg_slot_index,
2232 	u32 kbase_device_csg_slot_suspending
2233 )
2234 {
2235 	const u32 msg_id = KBASE_TL_KBASE_DEVICE_HALTING_CSG;
2236 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2237 		+ sizeof(kbase_device_id)
2238 		+ sizeof(kbase_device_csg_slot_index)
2239 		+ sizeof(kbase_device_csg_slot_suspending)
2240 		;
2241 	char *buffer;
2242 	unsigned long acq_flags;
2243 	size_t pos = 0;
2244 
2245 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2246 
2247 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2248 	pos = kbasep_serialize_timestamp(buffer, pos);
2249 	pos = kbasep_serialize_bytes(buffer,
2250 		pos, &kbase_device_id, sizeof(kbase_device_id));
2251 	pos = kbasep_serialize_bytes(buffer,
2252 		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
2253 	pos = kbasep_serialize_bytes(buffer,
2254 		pos, &kbase_device_csg_slot_suspending, sizeof(kbase_device_csg_slot_suspending));
2255 
2256 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2257 }
2258 
__kbase_tlstream_tl_kbase_device_suspend_csg(struct kbase_tlstream * stream,u32 kbase_device_id,u32 kbase_device_csg_slot_index)2259 void __kbase_tlstream_tl_kbase_device_suspend_csg(
2260 	struct kbase_tlstream *stream,
2261 	u32 kbase_device_id,
2262 	u32 kbase_device_csg_slot_index
2263 )
2264 {
2265 	const u32 msg_id = KBASE_TL_KBASE_DEVICE_SUSPEND_CSG;
2266 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2267 		+ sizeof(kbase_device_id)
2268 		+ sizeof(kbase_device_csg_slot_index)
2269 		;
2270 	char *buffer;
2271 	unsigned long acq_flags;
2272 	size_t pos = 0;
2273 
2274 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2275 
2276 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2277 	pos = kbasep_serialize_timestamp(buffer, pos);
2278 	pos = kbasep_serialize_bytes(buffer,
2279 		pos, &kbase_device_id, sizeof(kbase_device_id));
2280 	pos = kbasep_serialize_bytes(buffer,
2281 		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
2282 
2283 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2284 }
2285 
__kbase_tlstream_tl_kbase_device_csg_idle(struct kbase_tlstream * stream,u32 kbase_device_id,u32 kbase_device_csg_slot_index)2286 void __kbase_tlstream_tl_kbase_device_csg_idle(
2287 	struct kbase_tlstream *stream,
2288 	u32 kbase_device_id,
2289 	u32 kbase_device_csg_slot_index
2290 )
2291 {
2292 	const u32 msg_id = KBASE_TL_KBASE_DEVICE_CSG_IDLE;
2293 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2294 		+ sizeof(kbase_device_id)
2295 		+ sizeof(kbase_device_csg_slot_index)
2296 		;
2297 	char *buffer;
2298 	unsigned long acq_flags;
2299 	size_t pos = 0;
2300 
2301 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2302 
2303 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2304 	pos = kbasep_serialize_timestamp(buffer, pos);
2305 	pos = kbasep_serialize_bytes(buffer,
2306 		pos, &kbase_device_id, sizeof(kbase_device_id));
2307 	pos = kbasep_serialize_bytes(buffer,
2308 		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
2309 
2310 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2311 }
2312 
__kbase_tlstream_tl_kbase_new_ctx(struct kbase_tlstream * stream,u32 kernel_ctx_id,u32 kbase_device_id)2313 void __kbase_tlstream_tl_kbase_new_ctx(
2314 	struct kbase_tlstream *stream,
2315 	u32 kernel_ctx_id,
2316 	u32 kbase_device_id
2317 )
2318 {
2319 	const u32 msg_id = KBASE_TL_KBASE_NEW_CTX;
2320 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2321 		+ sizeof(kernel_ctx_id)
2322 		+ sizeof(kbase_device_id)
2323 		;
2324 	char *buffer;
2325 	unsigned long acq_flags;
2326 	size_t pos = 0;
2327 
2328 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2329 
2330 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2331 	pos = kbasep_serialize_timestamp(buffer, pos);
2332 	pos = kbasep_serialize_bytes(buffer,
2333 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2334 	pos = kbasep_serialize_bytes(buffer,
2335 		pos, &kbase_device_id, sizeof(kbase_device_id));
2336 
2337 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2338 }
2339 
__kbase_tlstream_tl_kbase_del_ctx(struct kbase_tlstream * stream,u32 kernel_ctx_id)2340 void __kbase_tlstream_tl_kbase_del_ctx(
2341 	struct kbase_tlstream *stream,
2342 	u32 kernel_ctx_id
2343 )
2344 {
2345 	const u32 msg_id = KBASE_TL_KBASE_DEL_CTX;
2346 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2347 		+ sizeof(kernel_ctx_id)
2348 		;
2349 	char *buffer;
2350 	unsigned long acq_flags;
2351 	size_t pos = 0;
2352 
2353 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2354 
2355 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2356 	pos = kbasep_serialize_timestamp(buffer, pos);
2357 	pos = kbasep_serialize_bytes(buffer,
2358 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2359 
2360 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2361 }
2362 
__kbase_tlstream_tl_kbase_ctx_assign_as(struct kbase_tlstream * stream,u32 kernel_ctx_id,u32 kbase_device_as_index)2363 void __kbase_tlstream_tl_kbase_ctx_assign_as(
2364 	struct kbase_tlstream *stream,
2365 	u32 kernel_ctx_id,
2366 	u32 kbase_device_as_index
2367 )
2368 {
2369 	const u32 msg_id = KBASE_TL_KBASE_CTX_ASSIGN_AS;
2370 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2371 		+ sizeof(kernel_ctx_id)
2372 		+ sizeof(kbase_device_as_index)
2373 		;
2374 	char *buffer;
2375 	unsigned long acq_flags;
2376 	size_t pos = 0;
2377 
2378 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2379 
2380 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2381 	pos = kbasep_serialize_timestamp(buffer, pos);
2382 	pos = kbasep_serialize_bytes(buffer,
2383 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2384 	pos = kbasep_serialize_bytes(buffer,
2385 		pos, &kbase_device_as_index, sizeof(kbase_device_as_index));
2386 
2387 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2388 }
2389 
__kbase_tlstream_tl_kbase_ctx_unassign_as(struct kbase_tlstream * stream,u32 kernel_ctx_id)2390 void __kbase_tlstream_tl_kbase_ctx_unassign_as(
2391 	struct kbase_tlstream *stream,
2392 	u32 kernel_ctx_id
2393 )
2394 {
2395 	const u32 msg_id = KBASE_TL_KBASE_CTX_UNASSIGN_AS;
2396 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2397 		+ sizeof(kernel_ctx_id)
2398 		;
2399 	char *buffer;
2400 	unsigned long acq_flags;
2401 	size_t pos = 0;
2402 
2403 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2404 
2405 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2406 	pos = kbasep_serialize_timestamp(buffer, pos);
2407 	pos = kbasep_serialize_bytes(buffer,
2408 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2409 
2410 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2411 }
2412 
__kbase_tlstream_tl_kbase_new_kcpuqueue(struct kbase_tlstream * stream,const void * kcpu_queue,u32 kcpu_queue_id,u32 kernel_ctx_id,u32 kcpuq_num_pending_cmds)2413 void __kbase_tlstream_tl_kbase_new_kcpuqueue(
2414 	struct kbase_tlstream *stream,
2415 	const void *kcpu_queue,
2416 	u32 kcpu_queue_id,
2417 	u32 kernel_ctx_id,
2418 	u32 kcpuq_num_pending_cmds
2419 )
2420 {
2421 	const u32 msg_id = KBASE_TL_KBASE_NEW_KCPUQUEUE;
2422 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2423 		+ sizeof(kcpu_queue)
2424 		+ sizeof(kcpu_queue_id)
2425 		+ sizeof(kernel_ctx_id)
2426 		+ sizeof(kcpuq_num_pending_cmds)
2427 		;
2428 	char *buffer;
2429 	unsigned long acq_flags;
2430 	size_t pos = 0;
2431 
2432 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2433 
2434 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2435 	pos = kbasep_serialize_timestamp(buffer, pos);
2436 	pos = kbasep_serialize_bytes(buffer,
2437 		pos, &kcpu_queue, sizeof(kcpu_queue));
2438 	pos = kbasep_serialize_bytes(buffer,
2439 		pos, &kcpu_queue_id, sizeof(kcpu_queue_id));
2440 	pos = kbasep_serialize_bytes(buffer,
2441 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
2442 	pos = kbasep_serialize_bytes(buffer,
2443 		pos, &kcpuq_num_pending_cmds, sizeof(kcpuq_num_pending_cmds));
2444 
2445 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2446 }
2447 
__kbase_tlstream_tl_kbase_del_kcpuqueue(struct kbase_tlstream * stream,const void * kcpu_queue)2448 void __kbase_tlstream_tl_kbase_del_kcpuqueue(
2449 	struct kbase_tlstream *stream,
2450 	const void *kcpu_queue
2451 )
2452 {
2453 	const u32 msg_id = KBASE_TL_KBASE_DEL_KCPUQUEUE;
2454 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2455 		+ sizeof(kcpu_queue)
2456 		;
2457 	char *buffer;
2458 	unsigned long acq_flags;
2459 	size_t pos = 0;
2460 
2461 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2462 
2463 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2464 	pos = kbasep_serialize_timestamp(buffer, pos);
2465 	pos = kbasep_serialize_bytes(buffer,
2466 		pos, &kcpu_queue, sizeof(kcpu_queue));
2467 
2468 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2469 }
2470 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(struct kbase_tlstream * stream,const void * kcpu_queue,const void * fence)2471 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
2472 	struct kbase_tlstream *stream,
2473 	const void *kcpu_queue,
2474 	const void *fence
2475 )
2476 {
2477 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
2478 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2479 		+ sizeof(kcpu_queue)
2480 		+ sizeof(fence)
2481 		;
2482 	char *buffer;
2483 	unsigned long acq_flags;
2484 	size_t pos = 0;
2485 
2486 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2487 
2488 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2489 	pos = kbasep_serialize_timestamp(buffer, pos);
2490 	pos = kbasep_serialize_bytes(buffer,
2491 		pos, &kcpu_queue, sizeof(kcpu_queue));
2492 	pos = kbasep_serialize_bytes(buffer,
2493 		pos, &fence, sizeof(fence));
2494 
2495 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2496 }
2497 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(struct kbase_tlstream * stream,const void * kcpu_queue,const void * fence)2498 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
2499 	struct kbase_tlstream *stream,
2500 	const void *kcpu_queue,
2501 	const void *fence
2502 )
2503 {
2504 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
2505 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2506 		+ sizeof(kcpu_queue)
2507 		+ sizeof(fence)
2508 		;
2509 	char *buffer;
2510 	unsigned long acq_flags;
2511 	size_t pos = 0;
2512 
2513 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2514 
2515 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2516 	pos = kbasep_serialize_timestamp(buffer, pos);
2517 	pos = kbasep_serialize_bytes(buffer,
2518 		pos, &kcpu_queue, sizeof(kcpu_queue));
2519 	pos = kbasep_serialize_bytes(buffer,
2520 		pos, &fence, sizeof(fence));
2521 
2522 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2523 }
2524 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait(struct kbase_tlstream * stream,const void * kcpu_queue,u64 cqs_obj_gpu_addr,u32 compare_value,u32 inherit_error)2525 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait(
2526 	struct kbase_tlstream *stream,
2527 	const void *kcpu_queue,
2528 	u64 cqs_obj_gpu_addr,
2529 	u32 compare_value,
2530 	u32 inherit_error
2531 )
2532 {
2533 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT;
2534 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2535 		+ sizeof(kcpu_queue)
2536 		+ sizeof(cqs_obj_gpu_addr)
2537 		+ sizeof(compare_value)
2538 		+ sizeof(inherit_error)
2539 		;
2540 	char *buffer;
2541 	unsigned long acq_flags;
2542 	size_t pos = 0;
2543 
2544 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2545 
2546 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2547 	pos = kbasep_serialize_timestamp(buffer, pos);
2548 	pos = kbasep_serialize_bytes(buffer,
2549 		pos, &kcpu_queue, sizeof(kcpu_queue));
2550 	pos = kbasep_serialize_bytes(buffer,
2551 		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
2552 	pos = kbasep_serialize_bytes(buffer,
2553 		pos, &compare_value, sizeof(compare_value));
2554 	pos = kbasep_serialize_bytes(buffer,
2555 		pos, &inherit_error, sizeof(inherit_error));
2556 
2557 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2558 }
2559 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set(struct kbase_tlstream * stream,const void * kcpu_queue,u64 cqs_obj_gpu_addr)2560 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set(
2561 	struct kbase_tlstream *stream,
2562 	const void *kcpu_queue,
2563 	u64 cqs_obj_gpu_addr
2564 )
2565 {
2566 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET;
2567 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2568 		+ sizeof(kcpu_queue)
2569 		+ sizeof(cqs_obj_gpu_addr)
2570 		;
2571 	char *buffer;
2572 	unsigned long acq_flags;
2573 	size_t pos = 0;
2574 
2575 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2576 
2577 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2578 	pos = kbasep_serialize_timestamp(buffer, pos);
2579 	pos = kbasep_serialize_bytes(buffer,
2580 		pos, &kcpu_queue, sizeof(kcpu_queue));
2581 	pos = kbasep_serialize_bytes(buffer,
2582 		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
2583 
2584 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2585 }
2586 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait_operation(struct kbase_tlstream * stream,const void * kcpu_queue,u64 cqs_obj_gpu_addr,u64 compare_value,u32 condition,u32 data_type,u32 inherit_error)2587 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait_operation(
2588 	struct kbase_tlstream *stream,
2589 	const void *kcpu_queue,
2590 	u64 cqs_obj_gpu_addr,
2591 	u64 compare_value,
2592 	u32 condition,
2593 	u32 data_type,
2594 	u32 inherit_error
2595 )
2596 {
2597 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION;
2598 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2599 		+ sizeof(kcpu_queue)
2600 		+ sizeof(cqs_obj_gpu_addr)
2601 		+ sizeof(compare_value)
2602 		+ sizeof(condition)
2603 		+ sizeof(data_type)
2604 		+ sizeof(inherit_error)
2605 		;
2606 	char *buffer;
2607 	unsigned long acq_flags;
2608 	size_t pos = 0;
2609 
2610 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2611 
2612 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2613 	pos = kbasep_serialize_timestamp(buffer, pos);
2614 	pos = kbasep_serialize_bytes(buffer,
2615 		pos, &kcpu_queue, sizeof(kcpu_queue));
2616 	pos = kbasep_serialize_bytes(buffer,
2617 		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
2618 	pos = kbasep_serialize_bytes(buffer,
2619 		pos, &compare_value, sizeof(compare_value));
2620 	pos = kbasep_serialize_bytes(buffer,
2621 		pos, &condition, sizeof(condition));
2622 	pos = kbasep_serialize_bytes(buffer,
2623 		pos, &data_type, sizeof(data_type));
2624 	pos = kbasep_serialize_bytes(buffer,
2625 		pos, &inherit_error, sizeof(inherit_error));
2626 
2627 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2628 }
2629 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set_operation(struct kbase_tlstream * stream,const void * kcpu_queue,u64 cqs_obj_gpu_addr,u64 value,u32 operation,u32 data_type)2630 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set_operation(
2631 	struct kbase_tlstream *stream,
2632 	const void *kcpu_queue,
2633 	u64 cqs_obj_gpu_addr,
2634 	u64 value,
2635 	u32 operation,
2636 	u32 data_type
2637 )
2638 {
2639 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION;
2640 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2641 		+ sizeof(kcpu_queue)
2642 		+ sizeof(cqs_obj_gpu_addr)
2643 		+ sizeof(value)
2644 		+ sizeof(operation)
2645 		+ sizeof(data_type)
2646 		;
2647 	char *buffer;
2648 	unsigned long acq_flags;
2649 	size_t pos = 0;
2650 
2651 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2652 
2653 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2654 	pos = kbasep_serialize_timestamp(buffer, pos);
2655 	pos = kbasep_serialize_bytes(buffer,
2656 		pos, &kcpu_queue, sizeof(kcpu_queue));
2657 	pos = kbasep_serialize_bytes(buffer,
2658 		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
2659 	pos = kbasep_serialize_bytes(buffer,
2660 		pos, &value, sizeof(value));
2661 	pos = kbasep_serialize_bytes(buffer,
2662 		pos, &operation, sizeof(operation));
2663 	pos = kbasep_serialize_bytes(buffer,
2664 		pos, &data_type, sizeof(data_type));
2665 
2666 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2667 }
2668 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(struct kbase_tlstream * stream,const void * kcpu_queue,u64 map_import_buf_gpu_addr)2669 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
2670 	struct kbase_tlstream *stream,
2671 	const void *kcpu_queue,
2672 	u64 map_import_buf_gpu_addr
2673 )
2674 {
2675 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
2676 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2677 		+ sizeof(kcpu_queue)
2678 		+ sizeof(map_import_buf_gpu_addr)
2679 		;
2680 	char *buffer;
2681 	unsigned long acq_flags;
2682 	size_t pos = 0;
2683 
2684 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2685 
2686 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2687 	pos = kbasep_serialize_timestamp(buffer, pos);
2688 	pos = kbasep_serialize_bytes(buffer,
2689 		pos, &kcpu_queue, sizeof(kcpu_queue));
2690 	pos = kbasep_serialize_bytes(buffer,
2691 		pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
2692 
2693 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2694 }
2695 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(struct kbase_tlstream * stream,const void * kcpu_queue,u64 map_import_buf_gpu_addr)2696 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
2697 	struct kbase_tlstream *stream,
2698 	const void *kcpu_queue,
2699 	u64 map_import_buf_gpu_addr
2700 )
2701 {
2702 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
2703 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2704 		+ sizeof(kcpu_queue)
2705 		+ sizeof(map_import_buf_gpu_addr)
2706 		;
2707 	char *buffer;
2708 	unsigned long acq_flags;
2709 	size_t pos = 0;
2710 
2711 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2712 
2713 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2714 	pos = kbasep_serialize_timestamp(buffer, pos);
2715 	pos = kbasep_serialize_bytes(buffer,
2716 		pos, &kcpu_queue, sizeof(kcpu_queue));
2717 	pos = kbasep_serialize_bytes(buffer,
2718 		pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
2719 
2720 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2721 }
2722 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(struct kbase_tlstream * stream,const void * kcpu_queue,u64 map_import_buf_gpu_addr)2723 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
2724 	struct kbase_tlstream *stream,
2725 	const void *kcpu_queue,
2726 	u64 map_import_buf_gpu_addr
2727 )
2728 {
2729 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE;
2730 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2731 		+ sizeof(kcpu_queue)
2732 		+ sizeof(map_import_buf_gpu_addr)
2733 		;
2734 	char *buffer;
2735 	unsigned long acq_flags;
2736 	size_t pos = 0;
2737 
2738 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2739 
2740 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2741 	pos = kbasep_serialize_timestamp(buffer, pos);
2742 	pos = kbasep_serialize_bytes(buffer,
2743 		pos, &kcpu_queue, sizeof(kcpu_queue));
2744 	pos = kbasep_serialize_bytes(buffer,
2745 		pos, &map_import_buf_gpu_addr, sizeof(map_import_buf_gpu_addr));
2746 
2747 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2748 }
2749 
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(struct kbase_tlstream * stream,const void * kcpu_queue)2750 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
2751 	struct kbase_tlstream *stream,
2752 	const void *kcpu_queue
2753 )
2754 {
2755 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
2756 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2757 		+ sizeof(kcpu_queue)
2758 		;
2759 	char *buffer;
2760 	unsigned long acq_flags;
2761 	size_t pos = 0;
2762 
2763 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2764 
2765 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2766 	pos = kbasep_serialize_timestamp(buffer, pos);
2767 	pos = kbasep_serialize_bytes(buffer,
2768 		pos, &kcpu_queue, sizeof(kcpu_queue));
2769 
2770 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2771 }
2772 
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(struct kbase_tlstream * stream,const void * kcpu_queue,u64 jit_alloc_gpu_alloc_addr_dest,u64 jit_alloc_va_pages,u64 jit_alloc_commit_pages,u64 jit_alloc_extent,u32 jit_alloc_jit_id,u32 jit_alloc_bin_id,u32 jit_alloc_max_allocations,u32 jit_alloc_flags,u32 jit_alloc_usage_id)2773 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_alloc(
2774 	struct kbase_tlstream *stream,
2775 	const void *kcpu_queue,
2776 	u64 jit_alloc_gpu_alloc_addr_dest,
2777 	u64 jit_alloc_va_pages,
2778 	u64 jit_alloc_commit_pages,
2779 	u64 jit_alloc_extent,
2780 	u32 jit_alloc_jit_id,
2781 	u32 jit_alloc_bin_id,
2782 	u32 jit_alloc_max_allocations,
2783 	u32 jit_alloc_flags,
2784 	u32 jit_alloc_usage_id
2785 )
2786 {
2787 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
2788 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2789 		+ sizeof(kcpu_queue)
2790 		+ sizeof(jit_alloc_gpu_alloc_addr_dest)
2791 		+ sizeof(jit_alloc_va_pages)
2792 		+ sizeof(jit_alloc_commit_pages)
2793 		+ sizeof(jit_alloc_extent)
2794 		+ sizeof(jit_alloc_jit_id)
2795 		+ sizeof(jit_alloc_bin_id)
2796 		+ sizeof(jit_alloc_max_allocations)
2797 		+ sizeof(jit_alloc_flags)
2798 		+ sizeof(jit_alloc_usage_id)
2799 		;
2800 	char *buffer;
2801 	unsigned long acq_flags;
2802 	size_t pos = 0;
2803 
2804 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2805 
2806 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2807 	pos = kbasep_serialize_timestamp(buffer, pos);
2808 	pos = kbasep_serialize_bytes(buffer,
2809 		pos, &kcpu_queue, sizeof(kcpu_queue));
2810 	pos = kbasep_serialize_bytes(buffer,
2811 		pos, &jit_alloc_gpu_alloc_addr_dest, sizeof(jit_alloc_gpu_alloc_addr_dest));
2812 	pos = kbasep_serialize_bytes(buffer,
2813 		pos, &jit_alloc_va_pages, sizeof(jit_alloc_va_pages));
2814 	pos = kbasep_serialize_bytes(buffer,
2815 		pos, &jit_alloc_commit_pages, sizeof(jit_alloc_commit_pages));
2816 	pos = kbasep_serialize_bytes(buffer,
2817 		pos, &jit_alloc_extent, sizeof(jit_alloc_extent));
2818 	pos = kbasep_serialize_bytes(buffer,
2819 		pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
2820 	pos = kbasep_serialize_bytes(buffer,
2821 		pos, &jit_alloc_bin_id, sizeof(jit_alloc_bin_id));
2822 	pos = kbasep_serialize_bytes(buffer,
2823 		pos, &jit_alloc_max_allocations, sizeof(jit_alloc_max_allocations));
2824 	pos = kbasep_serialize_bytes(buffer,
2825 		pos, &jit_alloc_flags, sizeof(jit_alloc_flags));
2826 	pos = kbasep_serialize_bytes(buffer,
2827 		pos, &jit_alloc_usage_id, sizeof(jit_alloc_usage_id));
2828 
2829 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2830 }
2831 
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(struct kbase_tlstream * stream,const void * kcpu_queue)2832 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
2833 	struct kbase_tlstream *stream,
2834 	const void *kcpu_queue
2835 )
2836 {
2837 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
2838 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2839 		+ sizeof(kcpu_queue)
2840 		;
2841 	char *buffer;
2842 	unsigned long acq_flags;
2843 	size_t pos = 0;
2844 
2845 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2846 
2847 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2848 	pos = kbasep_serialize_timestamp(buffer, pos);
2849 	pos = kbasep_serialize_bytes(buffer,
2850 		pos, &kcpu_queue, sizeof(kcpu_queue));
2851 
2852 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2853 }
2854 
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(struct kbase_tlstream * stream,const void * kcpu_queue)2855 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
2856 	struct kbase_tlstream *stream,
2857 	const void *kcpu_queue
2858 )
2859 {
2860 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
2861 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2862 		+ sizeof(kcpu_queue)
2863 		;
2864 	char *buffer;
2865 	unsigned long acq_flags;
2866 	size_t pos = 0;
2867 
2868 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2869 
2870 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2871 	pos = kbasep_serialize_timestamp(buffer, pos);
2872 	pos = kbasep_serialize_bytes(buffer,
2873 		pos, &kcpu_queue, sizeof(kcpu_queue));
2874 
2875 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2876 }
2877 
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(struct kbase_tlstream * stream,const void * kcpu_queue,u32 jit_alloc_jit_id)2878 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
2879 	struct kbase_tlstream *stream,
2880 	const void *kcpu_queue,
2881 	u32 jit_alloc_jit_id
2882 )
2883 {
2884 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
2885 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2886 		+ sizeof(kcpu_queue)
2887 		+ sizeof(jit_alloc_jit_id)
2888 		;
2889 	char *buffer;
2890 	unsigned long acq_flags;
2891 	size_t pos = 0;
2892 
2893 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2894 
2895 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2896 	pos = kbasep_serialize_timestamp(buffer, pos);
2897 	pos = kbasep_serialize_bytes(buffer,
2898 		pos, &kcpu_queue, sizeof(kcpu_queue));
2899 	pos = kbasep_serialize_bytes(buffer,
2900 		pos, &jit_alloc_jit_id, sizeof(jit_alloc_jit_id));
2901 
2902 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2903 }
2904 
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(struct kbase_tlstream * stream,const void * kcpu_queue)2905 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
2906 	struct kbase_tlstream *stream,
2907 	const void *kcpu_queue
2908 )
2909 {
2910 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
2911 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2912 		+ sizeof(kcpu_queue)
2913 		;
2914 	char *buffer;
2915 	unsigned long acq_flags;
2916 	size_t pos = 0;
2917 
2918 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2919 
2920 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2921 	pos = kbasep_serialize_timestamp(buffer, pos);
2922 	pos = kbasep_serialize_bytes(buffer,
2923 		pos, &kcpu_queue, sizeof(kcpu_queue));
2924 
2925 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2926 }
2927 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier(struct kbase_tlstream * stream,const void * kcpu_queue)2928 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier(
2929 	struct kbase_tlstream *stream,
2930 	const void *kcpu_queue
2931 )
2932 {
2933 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER;
2934 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2935 		+ sizeof(kcpu_queue)
2936 		;
2937 	char *buffer;
2938 	unsigned long acq_flags;
2939 	size_t pos = 0;
2940 
2941 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2942 
2943 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2944 	pos = kbasep_serialize_timestamp(buffer, pos);
2945 	pos = kbasep_serialize_bytes(buffer,
2946 		pos, &kcpu_queue, sizeof(kcpu_queue));
2947 
2948 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2949 }
2950 
__kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend(struct kbase_tlstream * stream,const void * kcpu_queue,const void * group_suspend_buf,u32 gpu_cmdq_grp_handle)2951 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend(
2952 	struct kbase_tlstream *stream,
2953 	const void *kcpu_queue,
2954 	const void *group_suspend_buf,
2955 	u32 gpu_cmdq_grp_handle
2956 )
2957 {
2958 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND;
2959 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2960 		+ sizeof(kcpu_queue)
2961 		+ sizeof(group_suspend_buf)
2962 		+ sizeof(gpu_cmdq_grp_handle)
2963 		;
2964 	char *buffer;
2965 	unsigned long acq_flags;
2966 	size_t pos = 0;
2967 
2968 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2969 
2970 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2971 	pos = kbasep_serialize_timestamp(buffer, pos);
2972 	pos = kbasep_serialize_bytes(buffer,
2973 		pos, &kcpu_queue, sizeof(kcpu_queue));
2974 	pos = kbasep_serialize_bytes(buffer,
2975 		pos, &group_suspend_buf, sizeof(group_suspend_buf));
2976 	pos = kbasep_serialize_bytes(buffer,
2977 		pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
2978 
2979 	kbase_tlstream_msgbuf_release(stream, acq_flags);
2980 }
2981 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(struct kbase_tlstream * stream,const void * kcpu_queue)2982 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
2983 	struct kbase_tlstream *stream,
2984 	const void *kcpu_queue
2985 )
2986 {
2987 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
2988 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
2989 		+ sizeof(kcpu_queue)
2990 		;
2991 	char *buffer;
2992 	unsigned long acq_flags;
2993 	size_t pos = 0;
2994 
2995 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
2996 
2997 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
2998 	pos = kbasep_serialize_timestamp(buffer, pos);
2999 	pos = kbasep_serialize_bytes(buffer,
3000 		pos, &kcpu_queue, sizeof(kcpu_queue));
3001 
3002 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3003 }
3004 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3005 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
3006 	struct kbase_tlstream *stream,
3007 	const void *kcpu_queue,
3008 	u32 execute_error
3009 )
3010 {
3011 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
3012 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3013 		+ sizeof(kcpu_queue)
3014 		+ sizeof(execute_error)
3015 		;
3016 	char *buffer;
3017 	unsigned long acq_flags;
3018 	size_t pos = 0;
3019 
3020 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3021 
3022 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3023 	pos = kbasep_serialize_timestamp(buffer, pos);
3024 	pos = kbasep_serialize_bytes(buffer,
3025 		pos, &kcpu_queue, sizeof(kcpu_queue));
3026 	pos = kbasep_serialize_bytes(buffer,
3027 		pos, &execute_error, sizeof(execute_error));
3028 
3029 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3030 }
3031 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(struct kbase_tlstream * stream,const void * kcpu_queue)3032 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
3033 	struct kbase_tlstream *stream,
3034 	const void *kcpu_queue
3035 )
3036 {
3037 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
3038 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3039 		+ sizeof(kcpu_queue)
3040 		;
3041 	char *buffer;
3042 	unsigned long acq_flags;
3043 	size_t pos = 0;
3044 
3045 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3046 
3047 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3048 	pos = kbasep_serialize_timestamp(buffer, pos);
3049 	pos = kbasep_serialize_bytes(buffer,
3050 		pos, &kcpu_queue, sizeof(kcpu_queue));
3051 
3052 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3053 }
3054 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3055 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
3056 	struct kbase_tlstream *stream,
3057 	const void *kcpu_queue,
3058 	u32 execute_error
3059 )
3060 {
3061 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
3062 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3063 		+ sizeof(kcpu_queue)
3064 		+ sizeof(execute_error)
3065 		;
3066 	char *buffer;
3067 	unsigned long acq_flags;
3068 	size_t pos = 0;
3069 
3070 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3071 
3072 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3073 	pos = kbasep_serialize_timestamp(buffer, pos);
3074 	pos = kbasep_serialize_bytes(buffer,
3075 		pos, &kcpu_queue, sizeof(kcpu_queue));
3076 	pos = kbasep_serialize_bytes(buffer,
3077 		pos, &execute_error, sizeof(execute_error));
3078 
3079 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3080 }
3081 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(struct kbase_tlstream * stream,const void * kcpu_queue)3082 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
3083 	struct kbase_tlstream *stream,
3084 	const void *kcpu_queue
3085 )
3086 {
3087 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
3088 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3089 		+ sizeof(kcpu_queue)
3090 		;
3091 	char *buffer;
3092 	unsigned long acq_flags;
3093 	size_t pos = 0;
3094 
3095 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3096 
3097 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3098 	pos = kbasep_serialize_timestamp(buffer, pos);
3099 	pos = kbasep_serialize_bytes(buffer,
3100 		pos, &kcpu_queue, sizeof(kcpu_queue));
3101 
3102 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3103 }
3104 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3105 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
3106 	struct kbase_tlstream *stream,
3107 	const void *kcpu_queue,
3108 	u32 execute_error
3109 )
3110 {
3111 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
3112 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3113 		+ sizeof(kcpu_queue)
3114 		+ sizeof(execute_error)
3115 		;
3116 	char *buffer;
3117 	unsigned long acq_flags;
3118 	size_t pos = 0;
3119 
3120 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3121 
3122 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3123 	pos = kbasep_serialize_timestamp(buffer, pos);
3124 	pos = kbasep_serialize_bytes(buffer,
3125 		pos, &kcpu_queue, sizeof(kcpu_queue));
3126 	pos = kbasep_serialize_bytes(buffer,
3127 		pos, &execute_error, sizeof(execute_error));
3128 
3129 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3130 }
3131 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3132 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
3133 	struct kbase_tlstream *stream,
3134 	const void *kcpu_queue,
3135 	u32 execute_error
3136 )
3137 {
3138 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET;
3139 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3140 		+ sizeof(kcpu_queue)
3141 		+ sizeof(execute_error)
3142 		;
3143 	char *buffer;
3144 	unsigned long acq_flags;
3145 	size_t pos = 0;
3146 
3147 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3148 
3149 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3150 	pos = kbasep_serialize_timestamp(buffer, pos);
3151 	pos = kbasep_serialize_bytes(buffer,
3152 		pos, &kcpu_queue, sizeof(kcpu_queue));
3153 	pos = kbasep_serialize_bytes(buffer,
3154 		pos, &execute_error, sizeof(execute_error));
3155 
3156 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3157 }
3158 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_start(struct kbase_tlstream * stream,const void * kcpu_queue)3159 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_start(
3160 	struct kbase_tlstream *stream,
3161 	const void *kcpu_queue
3162 )
3163 {
3164 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START;
3165 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3166 		+ sizeof(kcpu_queue)
3167 		;
3168 	char *buffer;
3169 	unsigned long acq_flags;
3170 	size_t pos = 0;
3171 
3172 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3173 
3174 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3175 	pos = kbasep_serialize_timestamp(buffer, pos);
3176 	pos = kbasep_serialize_bytes(buffer,
3177 		pos, &kcpu_queue, sizeof(kcpu_queue));
3178 
3179 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3180 }
3181 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3182 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_end(
3183 	struct kbase_tlstream *stream,
3184 	const void *kcpu_queue,
3185 	u32 execute_error
3186 )
3187 {
3188 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END;
3189 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3190 		+ sizeof(kcpu_queue)
3191 		+ sizeof(execute_error)
3192 		;
3193 	char *buffer;
3194 	unsigned long acq_flags;
3195 	size_t pos = 0;
3196 
3197 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3198 
3199 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3200 	pos = kbasep_serialize_timestamp(buffer, pos);
3201 	pos = kbasep_serialize_bytes(buffer,
3202 		pos, &kcpu_queue, sizeof(kcpu_queue));
3203 	pos = kbasep_serialize_bytes(buffer,
3204 		pos, &execute_error, sizeof(execute_error));
3205 
3206 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3207 }
3208 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set_operation(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3209 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set_operation(
3210 	struct kbase_tlstream *stream,
3211 	const void *kcpu_queue,
3212 	u32 execute_error
3213 )
3214 {
3215 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION;
3216 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3217 		+ sizeof(kcpu_queue)
3218 		+ sizeof(execute_error)
3219 		;
3220 	char *buffer;
3221 	unsigned long acq_flags;
3222 	size_t pos = 0;
3223 
3224 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3225 
3226 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3227 	pos = kbasep_serialize_timestamp(buffer, pos);
3228 	pos = kbasep_serialize_bytes(buffer,
3229 		pos, &kcpu_queue, sizeof(kcpu_queue));
3230 	pos = kbasep_serialize_bytes(buffer,
3231 		pos, &execute_error, sizeof(execute_error));
3232 
3233 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3234 }
3235 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(struct kbase_tlstream * stream,const void * kcpu_queue)3236 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
3237 	struct kbase_tlstream *stream,
3238 	const void *kcpu_queue
3239 )
3240 {
3241 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
3242 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3243 		+ sizeof(kcpu_queue)
3244 		;
3245 	char *buffer;
3246 	unsigned long acq_flags;
3247 	size_t pos = 0;
3248 
3249 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3250 
3251 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3252 	pos = kbasep_serialize_timestamp(buffer, pos);
3253 	pos = kbasep_serialize_bytes(buffer,
3254 		pos, &kcpu_queue, sizeof(kcpu_queue));
3255 
3256 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3257 }
3258 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3259 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
3260 	struct kbase_tlstream *stream,
3261 	const void *kcpu_queue,
3262 	u32 execute_error
3263 )
3264 {
3265 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
3266 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3267 		+ sizeof(kcpu_queue)
3268 		+ sizeof(execute_error)
3269 		;
3270 	char *buffer;
3271 	unsigned long acq_flags;
3272 	size_t pos = 0;
3273 
3274 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3275 
3276 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3277 	pos = kbasep_serialize_timestamp(buffer, pos);
3278 	pos = kbasep_serialize_bytes(buffer,
3279 		pos, &kcpu_queue, sizeof(kcpu_queue));
3280 	pos = kbasep_serialize_bytes(buffer,
3281 		pos, &execute_error, sizeof(execute_error));
3282 
3283 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3284 }
3285 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(struct kbase_tlstream * stream,const void * kcpu_queue)3286 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
3287 	struct kbase_tlstream *stream,
3288 	const void *kcpu_queue
3289 )
3290 {
3291 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
3292 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3293 		+ sizeof(kcpu_queue)
3294 		;
3295 	char *buffer;
3296 	unsigned long acq_flags;
3297 	size_t pos = 0;
3298 
3299 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3300 
3301 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3302 	pos = kbasep_serialize_timestamp(buffer, pos);
3303 	pos = kbasep_serialize_bytes(buffer,
3304 		pos, &kcpu_queue, sizeof(kcpu_queue));
3305 
3306 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3307 }
3308 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3309 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
3310 	struct kbase_tlstream *stream,
3311 	const void *kcpu_queue,
3312 	u32 execute_error
3313 )
3314 {
3315 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
3316 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3317 		+ sizeof(kcpu_queue)
3318 		+ sizeof(execute_error)
3319 		;
3320 	char *buffer;
3321 	unsigned long acq_flags;
3322 	size_t pos = 0;
3323 
3324 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3325 
3326 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3327 	pos = kbasep_serialize_timestamp(buffer, pos);
3328 	pos = kbasep_serialize_bytes(buffer,
3329 		pos, &kcpu_queue, sizeof(kcpu_queue));
3330 	pos = kbasep_serialize_bytes(buffer,
3331 		pos, &execute_error, sizeof(execute_error));
3332 
3333 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3334 }
3335 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(struct kbase_tlstream * stream,const void * kcpu_queue)3336 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
3337 	struct kbase_tlstream *stream,
3338 	const void *kcpu_queue
3339 )
3340 {
3341 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START;
3342 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3343 		+ sizeof(kcpu_queue)
3344 		;
3345 	char *buffer;
3346 	unsigned long acq_flags;
3347 	size_t pos = 0;
3348 
3349 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3350 
3351 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3352 	pos = kbasep_serialize_timestamp(buffer, pos);
3353 	pos = kbasep_serialize_bytes(buffer,
3354 		pos, &kcpu_queue, sizeof(kcpu_queue));
3355 
3356 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3357 }
3358 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3359 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
3360 	struct kbase_tlstream *stream,
3361 	const void *kcpu_queue,
3362 	u32 execute_error
3363 )
3364 {
3365 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END;
3366 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3367 		+ sizeof(kcpu_queue)
3368 		+ sizeof(execute_error)
3369 		;
3370 	char *buffer;
3371 	unsigned long acq_flags;
3372 	size_t pos = 0;
3373 
3374 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3375 
3376 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3377 	pos = kbasep_serialize_timestamp(buffer, pos);
3378 	pos = kbasep_serialize_bytes(buffer,
3379 		pos, &kcpu_queue, sizeof(kcpu_queue));
3380 	pos = kbasep_serialize_bytes(buffer,
3381 		pos, &execute_error, sizeof(execute_error));
3382 
3383 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3384 }
3385 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(struct kbase_tlstream * stream,const void * kcpu_queue)3386 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
3387 	struct kbase_tlstream *stream,
3388 	const void *kcpu_queue
3389 )
3390 {
3391 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
3392 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3393 		+ sizeof(kcpu_queue)
3394 		;
3395 	char *buffer;
3396 	unsigned long acq_flags;
3397 	size_t pos = 0;
3398 
3399 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3400 
3401 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3402 	pos = kbasep_serialize_timestamp(buffer, pos);
3403 	pos = kbasep_serialize_bytes(buffer,
3404 		pos, &kcpu_queue, sizeof(kcpu_queue));
3405 
3406 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3407 }
3408 
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(struct kbase_tlstream * stream,const void * kcpu_queue)3409 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
3410 	struct kbase_tlstream *stream,
3411 	const void *kcpu_queue
3412 )
3413 {
3414 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
3415 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3416 		+ sizeof(kcpu_queue)
3417 		;
3418 	char *buffer;
3419 	unsigned long acq_flags;
3420 	size_t pos = 0;
3421 
3422 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3423 
3424 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3425 	pos = kbasep_serialize_timestamp(buffer, pos);
3426 	pos = kbasep_serialize_bytes(buffer,
3427 		pos, &kcpu_queue, sizeof(kcpu_queue));
3428 
3429 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3430 }
3431 
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error,u64 jit_alloc_gpu_alloc_addr,u64 jit_alloc_mmu_flags)3432 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_alloc_end(
3433 	struct kbase_tlstream *stream,
3434 	const void *kcpu_queue,
3435 	u32 execute_error,
3436 	u64 jit_alloc_gpu_alloc_addr,
3437 	u64 jit_alloc_mmu_flags
3438 )
3439 {
3440 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
3441 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3442 		+ sizeof(kcpu_queue)
3443 		+ sizeof(execute_error)
3444 		+ sizeof(jit_alloc_gpu_alloc_addr)
3445 		+ sizeof(jit_alloc_mmu_flags)
3446 		;
3447 	char *buffer;
3448 	unsigned long acq_flags;
3449 	size_t pos = 0;
3450 
3451 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3452 
3453 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3454 	pos = kbasep_serialize_timestamp(buffer, pos);
3455 	pos = kbasep_serialize_bytes(buffer,
3456 		pos, &kcpu_queue, sizeof(kcpu_queue));
3457 	pos = kbasep_serialize_bytes(buffer,
3458 		pos, &execute_error, sizeof(execute_error));
3459 	pos = kbasep_serialize_bytes(buffer,
3460 		pos, &jit_alloc_gpu_alloc_addr, sizeof(jit_alloc_gpu_alloc_addr));
3461 	pos = kbasep_serialize_bytes(buffer,
3462 		pos, &jit_alloc_mmu_flags, sizeof(jit_alloc_mmu_flags));
3463 
3464 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3465 }
3466 
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(struct kbase_tlstream * stream,const void * kcpu_queue)3467 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
3468 	struct kbase_tlstream *stream,
3469 	const void *kcpu_queue
3470 )
3471 {
3472 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
3473 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3474 		+ sizeof(kcpu_queue)
3475 		;
3476 	char *buffer;
3477 	unsigned long acq_flags;
3478 	size_t pos = 0;
3479 
3480 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3481 
3482 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3483 	pos = kbasep_serialize_timestamp(buffer, pos);
3484 	pos = kbasep_serialize_bytes(buffer,
3485 		pos, &kcpu_queue, sizeof(kcpu_queue));
3486 
3487 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3488 }
3489 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(struct kbase_tlstream * stream,const void * kcpu_queue)3490 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
3491 	struct kbase_tlstream *stream,
3492 	const void *kcpu_queue
3493 )
3494 {
3495 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START;
3496 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3497 		+ sizeof(kcpu_queue)
3498 		;
3499 	char *buffer;
3500 	unsigned long acq_flags;
3501 	size_t pos = 0;
3502 
3503 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3504 
3505 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3506 	pos = kbasep_serialize_timestamp(buffer, pos);
3507 	pos = kbasep_serialize_bytes(buffer,
3508 		pos, &kcpu_queue, sizeof(kcpu_queue));
3509 
3510 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3511 }
3512 
__kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(struct kbase_tlstream * stream,const void * kcpu_queue)3513 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
3514 	struct kbase_tlstream *stream,
3515 	const void *kcpu_queue
3516 )
3517 {
3518 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END;
3519 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3520 		+ sizeof(kcpu_queue)
3521 		;
3522 	char *buffer;
3523 	unsigned long acq_flags;
3524 	size_t pos = 0;
3525 
3526 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3527 
3528 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3529 	pos = kbasep_serialize_timestamp(buffer, pos);
3530 	pos = kbasep_serialize_bytes(buffer,
3531 		pos, &kcpu_queue, sizeof(kcpu_queue));
3532 
3533 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3534 }
3535 
__kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error,u64 jit_free_pages_used)3536 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_execute_jit_free_end(
3537 	struct kbase_tlstream *stream,
3538 	const void *kcpu_queue,
3539 	u32 execute_error,
3540 	u64 jit_free_pages_used
3541 )
3542 {
3543 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END;
3544 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3545 		+ sizeof(kcpu_queue)
3546 		+ sizeof(execute_error)
3547 		+ sizeof(jit_free_pages_used)
3548 		;
3549 	char *buffer;
3550 	unsigned long acq_flags;
3551 	size_t pos = 0;
3552 
3553 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3554 
3555 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3556 	pos = kbasep_serialize_timestamp(buffer, pos);
3557 	pos = kbasep_serialize_bytes(buffer,
3558 		pos, &kcpu_queue, sizeof(kcpu_queue));
3559 	pos = kbasep_serialize_bytes(buffer,
3560 		pos, &execute_error, sizeof(execute_error));
3561 	pos = kbasep_serialize_bytes(buffer,
3562 		pos, &jit_free_pages_used, sizeof(jit_free_pages_used));
3563 
3564 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3565 }
3566 
__kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(struct kbase_tlstream * stream,const void * kcpu_queue)3567 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
3568 	struct kbase_tlstream *stream,
3569 	const void *kcpu_queue
3570 )
3571 {
3572 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END;
3573 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3574 		+ sizeof(kcpu_queue)
3575 		;
3576 	char *buffer;
3577 	unsigned long acq_flags;
3578 	size_t pos = 0;
3579 
3580 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3581 
3582 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3583 	pos = kbasep_serialize_timestamp(buffer, pos);
3584 	pos = kbasep_serialize_bytes(buffer,
3585 		pos, &kcpu_queue, sizeof(kcpu_queue));
3586 
3587 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3588 }
3589 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier(struct kbase_tlstream * stream,const void * kcpu_queue)3590 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier(
3591 	struct kbase_tlstream *stream,
3592 	const void *kcpu_queue
3593 )
3594 {
3595 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER;
3596 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3597 		+ sizeof(kcpu_queue)
3598 		;
3599 	char *buffer;
3600 	unsigned long acq_flags;
3601 	size_t pos = 0;
3602 
3603 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3604 
3605 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3606 	pos = kbasep_serialize_timestamp(buffer, pos);
3607 	pos = kbasep_serialize_bytes(buffer,
3608 		pos, &kcpu_queue, sizeof(kcpu_queue));
3609 
3610 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3611 }
3612 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start(struct kbase_tlstream * stream,const void * kcpu_queue)3613 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start(
3614 	struct kbase_tlstream *stream,
3615 	const void *kcpu_queue
3616 )
3617 {
3618 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START;
3619 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3620 		+ sizeof(kcpu_queue)
3621 		;
3622 	char *buffer;
3623 	unsigned long acq_flags;
3624 	size_t pos = 0;
3625 
3626 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3627 
3628 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3629 	pos = kbasep_serialize_timestamp(buffer, pos);
3630 	pos = kbasep_serialize_bytes(buffer,
3631 		pos, &kcpu_queue, sizeof(kcpu_queue));
3632 
3633 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3634 }
3635 
__kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end(struct kbase_tlstream * stream,const void * kcpu_queue,u32 execute_error)3636 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end(
3637 	struct kbase_tlstream *stream,
3638 	const void *kcpu_queue,
3639 	u32 execute_error
3640 )
3641 {
3642 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END;
3643 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3644 		+ sizeof(kcpu_queue)
3645 		+ sizeof(execute_error)
3646 		;
3647 	char *buffer;
3648 	unsigned long acq_flags;
3649 	size_t pos = 0;
3650 
3651 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3652 
3653 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3654 	pos = kbasep_serialize_timestamp(buffer, pos);
3655 	pos = kbasep_serialize_bytes(buffer,
3656 		pos, &kcpu_queue, sizeof(kcpu_queue));
3657 	pos = kbasep_serialize_bytes(buffer,
3658 		pos, &execute_error, sizeof(execute_error));
3659 
3660 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3661 }
3662 
__kbase_tlstream_tl_kbase_csffw_fw_reloading(struct kbase_tlstream * stream,u64 csffw_cycle)3663 void __kbase_tlstream_tl_kbase_csffw_fw_reloading(
3664 	struct kbase_tlstream *stream,
3665 	u64 csffw_cycle
3666 )
3667 {
3668 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_RELOADING;
3669 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3670 		+ sizeof(csffw_cycle)
3671 		;
3672 	char *buffer;
3673 	unsigned long acq_flags;
3674 	size_t pos = 0;
3675 
3676 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3677 
3678 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3679 	pos = kbasep_serialize_timestamp(buffer, pos);
3680 	pos = kbasep_serialize_bytes(buffer,
3681 		pos, &csffw_cycle, sizeof(csffw_cycle));
3682 
3683 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3684 }
3685 
__kbase_tlstream_tl_kbase_csffw_fw_enabling(struct kbase_tlstream * stream,u64 csffw_cycle)3686 void __kbase_tlstream_tl_kbase_csffw_fw_enabling(
3687 	struct kbase_tlstream *stream,
3688 	u64 csffw_cycle
3689 )
3690 {
3691 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_ENABLING;
3692 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3693 		+ sizeof(csffw_cycle)
3694 		;
3695 	char *buffer;
3696 	unsigned long acq_flags;
3697 	size_t pos = 0;
3698 
3699 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3700 
3701 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3702 	pos = kbasep_serialize_timestamp(buffer, pos);
3703 	pos = kbasep_serialize_bytes(buffer,
3704 		pos, &csffw_cycle, sizeof(csffw_cycle));
3705 
3706 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3707 }
3708 
__kbase_tlstream_tl_kbase_csffw_fw_request_sleep(struct kbase_tlstream * stream,u64 csffw_cycle)3709 void __kbase_tlstream_tl_kbase_csffw_fw_request_sleep(
3710 	struct kbase_tlstream *stream,
3711 	u64 csffw_cycle
3712 )
3713 {
3714 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_REQUEST_SLEEP;
3715 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3716 		+ sizeof(csffw_cycle)
3717 		;
3718 	char *buffer;
3719 	unsigned long acq_flags;
3720 	size_t pos = 0;
3721 
3722 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3723 
3724 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3725 	pos = kbasep_serialize_timestamp(buffer, pos);
3726 	pos = kbasep_serialize_bytes(buffer,
3727 		pos, &csffw_cycle, sizeof(csffw_cycle));
3728 
3729 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3730 }
3731 
__kbase_tlstream_tl_kbase_csffw_fw_request_wakeup(struct kbase_tlstream * stream,u64 csffw_cycle)3732 void __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup(
3733 	struct kbase_tlstream *stream,
3734 	u64 csffw_cycle
3735 )
3736 {
3737 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP;
3738 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3739 		+ sizeof(csffw_cycle)
3740 		;
3741 	char *buffer;
3742 	unsigned long acq_flags;
3743 	size_t pos = 0;
3744 
3745 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3746 
3747 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3748 	pos = kbasep_serialize_timestamp(buffer, pos);
3749 	pos = kbasep_serialize_bytes(buffer,
3750 		pos, &csffw_cycle, sizeof(csffw_cycle));
3751 
3752 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3753 }
3754 
__kbase_tlstream_tl_kbase_csffw_fw_request_halt(struct kbase_tlstream * stream,u64 csffw_cycle)3755 void __kbase_tlstream_tl_kbase_csffw_fw_request_halt(
3756 	struct kbase_tlstream *stream,
3757 	u64 csffw_cycle
3758 )
3759 {
3760 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_REQUEST_HALT;
3761 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3762 		+ sizeof(csffw_cycle)
3763 		;
3764 	char *buffer;
3765 	unsigned long acq_flags;
3766 	size_t pos = 0;
3767 
3768 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3769 
3770 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3771 	pos = kbasep_serialize_timestamp(buffer, pos);
3772 	pos = kbasep_serialize_bytes(buffer,
3773 		pos, &csffw_cycle, sizeof(csffw_cycle));
3774 
3775 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3776 }
3777 
__kbase_tlstream_tl_kbase_csffw_fw_disabling(struct kbase_tlstream * stream,u64 csffw_cycle)3778 void __kbase_tlstream_tl_kbase_csffw_fw_disabling(
3779 	struct kbase_tlstream *stream,
3780 	u64 csffw_cycle
3781 )
3782 {
3783 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_DISABLING;
3784 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3785 		+ sizeof(csffw_cycle)
3786 		;
3787 	char *buffer;
3788 	unsigned long acq_flags;
3789 	size_t pos = 0;
3790 
3791 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3792 
3793 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3794 	pos = kbasep_serialize_timestamp(buffer, pos);
3795 	pos = kbasep_serialize_bytes(buffer,
3796 		pos, &csffw_cycle, sizeof(csffw_cycle));
3797 
3798 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3799 }
3800 
__kbase_tlstream_tl_kbase_csffw_fw_off(struct kbase_tlstream * stream,u64 csffw_cycle)3801 void __kbase_tlstream_tl_kbase_csffw_fw_off(
3802 	struct kbase_tlstream *stream,
3803 	u64 csffw_cycle
3804 )
3805 {
3806 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_OFF;
3807 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3808 		+ sizeof(csffw_cycle)
3809 		;
3810 	char *buffer;
3811 	unsigned long acq_flags;
3812 	size_t pos = 0;
3813 
3814 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3815 
3816 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3817 	pos = kbasep_serialize_timestamp(buffer, pos);
3818 	pos = kbasep_serialize_bytes(buffer,
3819 		pos, &csffw_cycle, sizeof(csffw_cycle));
3820 
3821 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3822 }
3823 
__kbase_tlstream_tl_kbase_csffw_tlstream_overflow(struct kbase_tlstream * stream,u64 csffw_timestamp,u64 csffw_cycle)3824 void __kbase_tlstream_tl_kbase_csffw_tlstream_overflow(
3825 	struct kbase_tlstream *stream,
3826 	u64 csffw_timestamp,
3827 	u64 csffw_cycle
3828 )
3829 {
3830 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW;
3831 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3832 		+ sizeof(csffw_timestamp)
3833 		+ sizeof(csffw_cycle)
3834 		;
3835 	char *buffer;
3836 	unsigned long acq_flags;
3837 	size_t pos = 0;
3838 
3839 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3840 
3841 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3842 	pos = kbasep_serialize_timestamp(buffer, pos);
3843 	pos = kbasep_serialize_bytes(buffer,
3844 		pos, &csffw_timestamp, sizeof(csffw_timestamp));
3845 	pos = kbasep_serialize_bytes(buffer,
3846 		pos, &csffw_cycle, sizeof(csffw_cycle));
3847 
3848 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3849 }
3850 
__kbase_tlstream_aux_pm_state(struct kbase_tlstream * stream,u32 core_type,u64 core_state_bitset)3851 void __kbase_tlstream_aux_pm_state(
3852 	struct kbase_tlstream *stream,
3853 	u32 core_type,
3854 	u64 core_state_bitset
3855 )
3856 {
3857 	const u32 msg_id = KBASE_AUX_PM_STATE;
3858 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3859 		+ sizeof(core_type)
3860 		+ sizeof(core_state_bitset)
3861 		;
3862 	char *buffer;
3863 	unsigned long acq_flags;
3864 	size_t pos = 0;
3865 
3866 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3867 
3868 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3869 	pos = kbasep_serialize_timestamp(buffer, pos);
3870 	pos = kbasep_serialize_bytes(buffer,
3871 		pos, &core_type, sizeof(core_type));
3872 	pos = kbasep_serialize_bytes(buffer,
3873 		pos, &core_state_bitset, sizeof(core_state_bitset));
3874 
3875 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3876 }
3877 
__kbase_tlstream_aux_pagefault(struct kbase_tlstream * stream,u32 ctx_nr,u32 as_nr,u64 page_cnt_change)3878 void __kbase_tlstream_aux_pagefault(
3879 	struct kbase_tlstream *stream,
3880 	u32 ctx_nr,
3881 	u32 as_nr,
3882 	u64 page_cnt_change
3883 )
3884 {
3885 	const u32 msg_id = KBASE_AUX_PAGEFAULT;
3886 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3887 		+ sizeof(ctx_nr)
3888 		+ sizeof(as_nr)
3889 		+ sizeof(page_cnt_change)
3890 		;
3891 	char *buffer;
3892 	unsigned long acq_flags;
3893 	size_t pos = 0;
3894 
3895 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3896 
3897 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3898 	pos = kbasep_serialize_timestamp(buffer, pos);
3899 	pos = kbasep_serialize_bytes(buffer,
3900 		pos, &ctx_nr, sizeof(ctx_nr));
3901 	pos = kbasep_serialize_bytes(buffer,
3902 		pos, &as_nr, sizeof(as_nr));
3903 	pos = kbasep_serialize_bytes(buffer,
3904 		pos, &page_cnt_change, sizeof(page_cnt_change));
3905 
3906 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3907 }
3908 
__kbase_tlstream_aux_pagesalloc(struct kbase_tlstream * stream,u32 ctx_nr,u64 page_cnt)3909 void __kbase_tlstream_aux_pagesalloc(
3910 	struct kbase_tlstream *stream,
3911 	u32 ctx_nr,
3912 	u64 page_cnt
3913 )
3914 {
3915 	const u32 msg_id = KBASE_AUX_PAGESALLOC;
3916 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3917 		+ sizeof(ctx_nr)
3918 		+ sizeof(page_cnt)
3919 		;
3920 	char *buffer;
3921 	unsigned long acq_flags;
3922 	size_t pos = 0;
3923 
3924 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3925 
3926 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3927 	pos = kbasep_serialize_timestamp(buffer, pos);
3928 	pos = kbasep_serialize_bytes(buffer,
3929 		pos, &ctx_nr, sizeof(ctx_nr));
3930 	pos = kbasep_serialize_bytes(buffer,
3931 		pos, &page_cnt, sizeof(page_cnt));
3932 
3933 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3934 }
3935 
__kbase_tlstream_aux_devfreq_target(struct kbase_tlstream * stream,u64 target_freq)3936 void __kbase_tlstream_aux_devfreq_target(
3937 	struct kbase_tlstream *stream,
3938 	u64 target_freq
3939 )
3940 {
3941 	const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
3942 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3943 		+ sizeof(target_freq)
3944 		;
3945 	char *buffer;
3946 	unsigned long acq_flags;
3947 	size_t pos = 0;
3948 
3949 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3950 
3951 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3952 	pos = kbasep_serialize_timestamp(buffer, pos);
3953 	pos = kbasep_serialize_bytes(buffer,
3954 		pos, &target_freq, sizeof(target_freq));
3955 
3956 	kbase_tlstream_msgbuf_release(stream, acq_flags);
3957 }
3958 
__kbase_tlstream_aux_jit_stats(struct kbase_tlstream * stream,u32 ctx_nr,u32 bid,u32 max_allocs,u32 allocs,u32 va_pages,u32 ph_pages)3959 void __kbase_tlstream_aux_jit_stats(
3960 	struct kbase_tlstream *stream,
3961 	u32 ctx_nr,
3962 	u32 bid,
3963 	u32 max_allocs,
3964 	u32 allocs,
3965 	u32 va_pages,
3966 	u32 ph_pages
3967 )
3968 {
3969 	const u32 msg_id = KBASE_AUX_JIT_STATS;
3970 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
3971 		+ sizeof(ctx_nr)
3972 		+ sizeof(bid)
3973 		+ sizeof(max_allocs)
3974 		+ sizeof(allocs)
3975 		+ sizeof(va_pages)
3976 		+ sizeof(ph_pages)
3977 		;
3978 	char *buffer;
3979 	unsigned long acq_flags;
3980 	size_t pos = 0;
3981 
3982 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
3983 
3984 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
3985 	pos = kbasep_serialize_timestamp(buffer, pos);
3986 	pos = kbasep_serialize_bytes(buffer,
3987 		pos, &ctx_nr, sizeof(ctx_nr));
3988 	pos = kbasep_serialize_bytes(buffer,
3989 		pos, &bid, sizeof(bid));
3990 	pos = kbasep_serialize_bytes(buffer,
3991 		pos, &max_allocs, sizeof(max_allocs));
3992 	pos = kbasep_serialize_bytes(buffer,
3993 		pos, &allocs, sizeof(allocs));
3994 	pos = kbasep_serialize_bytes(buffer,
3995 		pos, &va_pages, sizeof(va_pages));
3996 	pos = kbasep_serialize_bytes(buffer,
3997 		pos, &ph_pages, sizeof(ph_pages));
3998 
3999 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4000 }
4001 
__kbase_tlstream_aux_tiler_heap_stats(struct kbase_tlstream * stream,u32 ctx_nr,u64 heap_id,u32 va_pages,u32 ph_pages,u32 max_chunks,u32 chunk_size,u32 chunk_count,u32 target_in_flight,u32 nr_in_flight)4002 void __kbase_tlstream_aux_tiler_heap_stats(
4003 	struct kbase_tlstream *stream,
4004 	u32 ctx_nr,
4005 	u64 heap_id,
4006 	u32 va_pages,
4007 	u32 ph_pages,
4008 	u32 max_chunks,
4009 	u32 chunk_size,
4010 	u32 chunk_count,
4011 	u32 target_in_flight,
4012 	u32 nr_in_flight
4013 )
4014 {
4015 	const u32 msg_id = KBASE_AUX_TILER_HEAP_STATS;
4016 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4017 		+ sizeof(ctx_nr)
4018 		+ sizeof(heap_id)
4019 		+ sizeof(va_pages)
4020 		+ sizeof(ph_pages)
4021 		+ sizeof(max_chunks)
4022 		+ sizeof(chunk_size)
4023 		+ sizeof(chunk_count)
4024 		+ sizeof(target_in_flight)
4025 		+ sizeof(nr_in_flight)
4026 		;
4027 	char *buffer;
4028 	unsigned long acq_flags;
4029 	size_t pos = 0;
4030 
4031 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4032 
4033 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4034 	pos = kbasep_serialize_timestamp(buffer, pos);
4035 	pos = kbasep_serialize_bytes(buffer,
4036 		pos, &ctx_nr, sizeof(ctx_nr));
4037 	pos = kbasep_serialize_bytes(buffer,
4038 		pos, &heap_id, sizeof(heap_id));
4039 	pos = kbasep_serialize_bytes(buffer,
4040 		pos, &va_pages, sizeof(va_pages));
4041 	pos = kbasep_serialize_bytes(buffer,
4042 		pos, &ph_pages, sizeof(ph_pages));
4043 	pos = kbasep_serialize_bytes(buffer,
4044 		pos, &max_chunks, sizeof(max_chunks));
4045 	pos = kbasep_serialize_bytes(buffer,
4046 		pos, &chunk_size, sizeof(chunk_size));
4047 	pos = kbasep_serialize_bytes(buffer,
4048 		pos, &chunk_count, sizeof(chunk_count));
4049 	pos = kbasep_serialize_bytes(buffer,
4050 		pos, &target_in_flight, sizeof(target_in_flight));
4051 	pos = kbasep_serialize_bytes(buffer,
4052 		pos, &nr_in_flight, sizeof(nr_in_flight));
4053 
4054 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4055 }
4056 
__kbase_tlstream_aux_event_job_slot(struct kbase_tlstream * stream,const void * ctx,u32 slot_nr,u32 atom_nr,u32 event)4057 void __kbase_tlstream_aux_event_job_slot(
4058 	struct kbase_tlstream *stream,
4059 	const void *ctx,
4060 	u32 slot_nr,
4061 	u32 atom_nr,
4062 	u32 event
4063 )
4064 {
4065 	const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
4066 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4067 		+ sizeof(ctx)
4068 		+ sizeof(slot_nr)
4069 		+ sizeof(atom_nr)
4070 		+ sizeof(event)
4071 		;
4072 	char *buffer;
4073 	unsigned long acq_flags;
4074 	size_t pos = 0;
4075 
4076 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4077 
4078 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4079 	pos = kbasep_serialize_timestamp(buffer, pos);
4080 	pos = kbasep_serialize_bytes(buffer,
4081 		pos, &ctx, sizeof(ctx));
4082 	pos = kbasep_serialize_bytes(buffer,
4083 		pos, &slot_nr, sizeof(slot_nr));
4084 	pos = kbasep_serialize_bytes(buffer,
4085 		pos, &atom_nr, sizeof(atom_nr));
4086 	pos = kbasep_serialize_bytes(buffer,
4087 		pos, &event, sizeof(event));
4088 
4089 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4090 }
4091 
__kbase_tlstream_aux_protected_enter_start(struct kbase_tlstream * stream,const void * gpu)4092 void __kbase_tlstream_aux_protected_enter_start(
4093 	struct kbase_tlstream *stream,
4094 	const void *gpu
4095 )
4096 {
4097 	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
4098 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4099 		+ sizeof(gpu)
4100 		;
4101 	char *buffer;
4102 	unsigned long acq_flags;
4103 	size_t pos = 0;
4104 
4105 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4106 
4107 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4108 	pos = kbasep_serialize_timestamp(buffer, pos);
4109 	pos = kbasep_serialize_bytes(buffer,
4110 		pos, &gpu, sizeof(gpu));
4111 
4112 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4113 }
4114 
__kbase_tlstream_aux_protected_enter_end(struct kbase_tlstream * stream,const void * gpu)4115 void __kbase_tlstream_aux_protected_enter_end(
4116 	struct kbase_tlstream *stream,
4117 	const void *gpu
4118 )
4119 {
4120 	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
4121 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4122 		+ sizeof(gpu)
4123 		;
4124 	char *buffer;
4125 	unsigned long acq_flags;
4126 	size_t pos = 0;
4127 
4128 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4129 
4130 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4131 	pos = kbasep_serialize_timestamp(buffer, pos);
4132 	pos = kbasep_serialize_bytes(buffer,
4133 		pos, &gpu, sizeof(gpu));
4134 
4135 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4136 }
4137 
__kbase_tlstream_aux_mmu_command(struct kbase_tlstream * stream,u32 kernel_ctx_id,u32 mmu_cmd_id,u32 mmu_synchronicity,u64 mmu_lock_addr,u32 mmu_lock_page_num)4138 void __kbase_tlstream_aux_mmu_command(
4139 	struct kbase_tlstream *stream,
4140 	u32 kernel_ctx_id,
4141 	u32 mmu_cmd_id,
4142 	u32 mmu_synchronicity,
4143 	u64 mmu_lock_addr,
4144 	u32 mmu_lock_page_num
4145 )
4146 {
4147 	const u32 msg_id = KBASE_AUX_MMU_COMMAND;
4148 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4149 		+ sizeof(kernel_ctx_id)
4150 		+ sizeof(mmu_cmd_id)
4151 		+ sizeof(mmu_synchronicity)
4152 		+ sizeof(mmu_lock_addr)
4153 		+ sizeof(mmu_lock_page_num)
4154 		;
4155 	char *buffer;
4156 	unsigned long acq_flags;
4157 	size_t pos = 0;
4158 
4159 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4160 
4161 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4162 	pos = kbasep_serialize_timestamp(buffer, pos);
4163 	pos = kbasep_serialize_bytes(buffer,
4164 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
4165 	pos = kbasep_serialize_bytes(buffer,
4166 		pos, &mmu_cmd_id, sizeof(mmu_cmd_id));
4167 	pos = kbasep_serialize_bytes(buffer,
4168 		pos, &mmu_synchronicity, sizeof(mmu_synchronicity));
4169 	pos = kbasep_serialize_bytes(buffer,
4170 		pos, &mmu_lock_addr, sizeof(mmu_lock_addr));
4171 	pos = kbasep_serialize_bytes(buffer,
4172 		pos, &mmu_lock_page_num, sizeof(mmu_lock_page_num));
4173 
4174 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4175 }
4176 
__kbase_tlstream_aux_protected_leave_start(struct kbase_tlstream * stream,const void * gpu)4177 void __kbase_tlstream_aux_protected_leave_start(
4178 	struct kbase_tlstream *stream,
4179 	const void *gpu
4180 )
4181 {
4182 	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
4183 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4184 		+ sizeof(gpu)
4185 		;
4186 	char *buffer;
4187 	unsigned long acq_flags;
4188 	size_t pos = 0;
4189 
4190 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4191 
4192 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4193 	pos = kbasep_serialize_timestamp(buffer, pos);
4194 	pos = kbasep_serialize_bytes(buffer,
4195 		pos, &gpu, sizeof(gpu));
4196 
4197 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4198 }
4199 
__kbase_tlstream_aux_protected_leave_end(struct kbase_tlstream * stream,const void * gpu)4200 void __kbase_tlstream_aux_protected_leave_end(
4201 	struct kbase_tlstream *stream,
4202 	const void *gpu
4203 )
4204 {
4205 	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
4206 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
4207 		+ sizeof(gpu)
4208 		;
4209 	char *buffer;
4210 	unsigned long acq_flags;
4211 	size_t pos = 0;
4212 
4213 	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
4214 
4215 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
4216 	pos = kbasep_serialize_timestamp(buffer, pos);
4217 	pos = kbasep_serialize_bytes(buffer,
4218 		pos, &gpu, sizeof(gpu));
4219 
4220 	kbase_tlstream_msgbuf_release(stream, acq_flags);
4221 }
4222 
4223 /* clang-format on */
4224