xref: /OK3568_Linux_fs/kernel/include/uapi/gpu/arm/bifrost/csf/mali_kbase_csf_ioctl.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _UAPI_KBASE_CSF_IOCTL_H_
23 #define _UAPI_KBASE_CSF_IOCTL_H_
24 
25 #include <asm-generic/ioctl.h>
26 #include <linux/types.h>
27 
28 /*
29  * 1.0:
30  * - CSF IOCTL header separated from JM
31  * 1.1:
32  * - Add a new priority level BASE_QUEUE_GROUP_PRIORITY_REALTIME
33  * - Add ioctl 54: This controls the priority setting.
34  * 1.2:
35  * - Add new CSF GPU_FEATURES register into the property structure
36  *   returned by KBASE_IOCTL_GET_GPUPROPS
37  * 1.3:
38  * - Add __u32 group_uid member to
39  *   &struct_kbase_ioctl_cs_queue_group_create.out
40  * 1.4:
41  * - Replace padding in kbase_ioctl_cs_get_glb_iface with
42  *   instr_features member of same size
43  * 1.5:
44  * - Add ioctl 40: kbase_ioctl_cs_queue_register_ex, this is a new
45  *   queue registration call with extended format for supporting CS
46  *   trace configurations with CSF trace_command.
47  * 1.6:
48  * - Added new HW performance counters interface to all GPUs.
49  * 1.7:
50  * - Added reserved field to QUEUE_GROUP_CREATE ioctl for future use
51  * 1.8:
52  * - Removed Kernel legacy HWC interface
53  * 1.9:
54  * - Reorganization of GPU-VA memory zones, including addition of
55  *   FIXED_VA zone and auto-initialization of EXEC_VA zone.
56  * - Added new Base memory allocation interface
57  * 1.10:
58  * - First release of new HW performance counters interface.
59  * 1.11:
60  * - Dummy model (no mali) backend will now clear HWC values after each sample
61  * 1.12:
62  * - Added support for incremental rendering flag in CSG create call
63  * 1.13:
64  * - Added ioctl to query a register of USER page.
65  * 1.14:
66  * - Added support for passing down the buffer descriptor VA in tiler heap init
67  * 1.15:
68  * - Enable new sync_wait GE condition
69  * 1.16:
70  * - Remove legacy definitions:
71  *   - base_jit_alloc_info_10_2
72  *   - base_jit_alloc_info_11_5
73  *   - kbase_ioctl_mem_jit_init_10_2
74  *   - kbase_ioctl_mem_jit_init_11_5
75  * 1.17:
76  * - Fix kinstr_prfcnt issues:
77  *   - Missing implicit sample for CMD_STOP when HWCNT buffer is full.
78  *   - Race condition when stopping periodic sampling.
79  *   - prfcnt_block_metadata::block_idx gaps.
80  *   - PRFCNT_CONTROL_CMD_SAMPLE_ASYNC is removed.
81  * 1.18:
82  * - Relax the requirement to create a mapping with BASE_MEM_MAP_TRACKING_HANDLE
83  *   before allocating GPU memory for the context.
84  * - CPU mappings of USER_BUFFER imported memory handles must be cached.
85  */
86 
87 #define BASE_UK_VERSION_MAJOR 1
88 #define BASE_UK_VERSION_MINOR 18
89 
90 /**
91  * struct kbase_ioctl_version_check - Check version compatibility between
92  * kernel and userspace
93  *
94  * @major: Major version number
95  * @minor: Minor version number
96  */
97 struct kbase_ioctl_version_check {
98 	__u16 major;
99 	__u16 minor;
100 };
101 
102 #define KBASE_IOCTL_VERSION_CHECK_RESERVED \
103 	_IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
104 
105 /**
106  * struct kbase_ioctl_cs_queue_register - Register a GPU command queue with the
107  *                                        base back-end
108  *
109  * @buffer_gpu_addr: GPU address of the buffer backing the queue
110  * @buffer_size: Size of the buffer in bytes
111  * @priority: Priority of the queue within a group when run within a process
112  * @padding: Currently unused, must be zero
113  *
114  * Note: There is an identical sub-section in kbase_ioctl_cs_queue_register_ex.
115  *        Any change of this struct should also be mirrored to the latter.
116  */
117 struct kbase_ioctl_cs_queue_register {
118 	__u64 buffer_gpu_addr;
119 	__u32 buffer_size;
120 	__u8 priority;
121 	__u8 padding[3];
122 };
123 
124 #define KBASE_IOCTL_CS_QUEUE_REGISTER \
125 	_IOW(KBASE_IOCTL_TYPE, 36, struct kbase_ioctl_cs_queue_register)
126 
127 /**
128  * struct kbase_ioctl_cs_queue_kick - Kick the GPU command queue group scheduler
129  *                                    to notify that a queue has been updated
130  *
131  * @buffer_gpu_addr: GPU address of the buffer backing the queue
132  */
133 struct kbase_ioctl_cs_queue_kick {
134 	__u64 buffer_gpu_addr;
135 };
136 
137 #define KBASE_IOCTL_CS_QUEUE_KICK \
138 	_IOW(KBASE_IOCTL_TYPE, 37, struct kbase_ioctl_cs_queue_kick)
139 
140 /**
141  * union kbase_ioctl_cs_queue_bind - Bind a GPU command queue to a group
142  *
143  * @in:                 Input parameters
144  * @in.buffer_gpu_addr: GPU address of the buffer backing the queue
145  * @in.group_handle:    Handle of the group to which the queue should be bound
146  * @in.csi_index:       Index of the CSF interface the queue should be bound to
147  * @in.padding:         Currently unused, must be zero
148  * @out:                Output parameters
149  * @out.mmap_handle:    Handle to be used for creating the mapping of CS
150  *                      input/output pages
151  */
152 union kbase_ioctl_cs_queue_bind {
153 	struct {
154 		__u64 buffer_gpu_addr;
155 		__u8 group_handle;
156 		__u8 csi_index;
157 		__u8 padding[6];
158 	} in;
159 	struct {
160 		__u64 mmap_handle;
161 	} out;
162 };
163 
164 #define KBASE_IOCTL_CS_QUEUE_BIND \
165 	_IOWR(KBASE_IOCTL_TYPE, 39, union kbase_ioctl_cs_queue_bind)
166 
167 /**
168  * struct kbase_ioctl_cs_queue_register_ex - Register a GPU command queue with the
169  *                                           base back-end in extended format,
170  *                                           involving trace buffer configuration
171  *
172  * @buffer_gpu_addr: GPU address of the buffer backing the queue
173  * @buffer_size: Size of the buffer in bytes
174  * @priority: Priority of the queue within a group when run within a process
175  * @padding: Currently unused, must be zero
176  * @ex_offset_var_addr: GPU address of the trace buffer write offset variable
177  * @ex_buffer_base: Trace buffer GPU base address for the queue
178  * @ex_buffer_size: Size of the trace buffer in bytes
179  * @ex_event_size: Trace event write size, in log2 designation
180  * @ex_event_state: Trace event states configuration
181  * @ex_padding: Currently unused, must be zero
182  *
183  * Note: There is an identical sub-section at the start of this struct to that
184  *        of @ref kbase_ioctl_cs_queue_register. Any change of this sub-section
185  *        must also be mirrored to the latter. Following the said sub-section,
186  *        the remaining fields forms the extension, marked with ex_*.
187  */
188 struct kbase_ioctl_cs_queue_register_ex {
189 	__u64 buffer_gpu_addr;
190 	__u32 buffer_size;
191 	__u8 priority;
192 	__u8 padding[3];
193 	__u64 ex_offset_var_addr;
194 	__u64 ex_buffer_base;
195 	__u32 ex_buffer_size;
196 	__u8 ex_event_size;
197 	__u8 ex_event_state;
198 	__u8 ex_padding[2];
199 };
200 
201 #define KBASE_IOCTL_CS_QUEUE_REGISTER_EX \
202 	_IOW(KBASE_IOCTL_TYPE, 40, struct kbase_ioctl_cs_queue_register_ex)
203 
204 /**
205  * struct kbase_ioctl_cs_queue_terminate - Terminate a GPU command queue
206  *
207  * @buffer_gpu_addr: GPU address of the buffer backing the queue
208  */
209 struct kbase_ioctl_cs_queue_terminate {
210 	__u64 buffer_gpu_addr;
211 };
212 
213 #define KBASE_IOCTL_CS_QUEUE_TERMINATE \
214 	_IOW(KBASE_IOCTL_TYPE, 41, struct kbase_ioctl_cs_queue_terminate)
215 
216 /**
217  * union kbase_ioctl_cs_queue_group_create_1_6 - Create a GPU command queue
218  *                                               group
219  * @in:               Input parameters
220  * @in.tiler_mask:    Mask of tiler endpoints the group is allowed to use.
221  * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use.
222  * @in.compute_mask:  Mask of compute endpoints the group is allowed to use.
223  * @in.cs_min:        Minimum number of CSs required.
224  * @in.priority:      Queue group's priority within a process.
225  * @in.tiler_max:     Maximum number of tiler endpoints the group is allowed
226  *                    to use.
227  * @in.fragment_max:  Maximum number of fragment endpoints the group is
228  *                    allowed to use.
229  * @in.compute_max:   Maximum number of compute endpoints the group is allowed
230  *                    to use.
231  * @in.padding:       Currently unused, must be zero
232  * @out:              Output parameters
233  * @out.group_handle: Handle of a newly created queue group.
234  * @out.padding:      Currently unused, must be zero
235  * @out.group_uid:    UID of the queue group available to base.
236  */
237 union kbase_ioctl_cs_queue_group_create_1_6 {
238 	struct {
239 		__u64 tiler_mask;
240 		__u64 fragment_mask;
241 		__u64 compute_mask;
242 		__u8 cs_min;
243 		__u8 priority;
244 		__u8 tiler_max;
245 		__u8 fragment_max;
246 		__u8 compute_max;
247 		__u8 padding[3];
248 
249 	} in;
250 	struct {
251 		__u8 group_handle;
252 		__u8 padding[3];
253 		__u32 group_uid;
254 	} out;
255 };
256 
257 #define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6                                  \
258 	_IOWR(KBASE_IOCTL_TYPE, 42, union kbase_ioctl_cs_queue_group_create_1_6)
259 
260 /**
261  * union kbase_ioctl_cs_queue_group_create - Create a GPU command queue group
262  * @in:               Input parameters
263  * @in.tiler_mask:    Mask of tiler endpoints the group is allowed to use.
264  * @in.fragment_mask: Mask of fragment endpoints the group is allowed to use.
265  * @in.compute_mask:  Mask of compute endpoints the group is allowed to use.
266  * @in.cs_min:        Minimum number of CSs required.
267  * @in.priority:      Queue group's priority within a process.
268  * @in.tiler_max:     Maximum number of tiler endpoints the group is allowed
269  *                    to use.
270  * @in.fragment_max:  Maximum number of fragment endpoints the group is
271  *                    allowed to use.
272  * @in.compute_max:   Maximum number of compute endpoints the group is allowed
273  *                    to use.
274  * @in.csi_handlers:  Flags to signal that the application intends to use CSI
275  *                    exception handlers in some linear buffers to deal with
276  *                    the given exception types.
277  * @in.padding:       Currently unused, must be zero
278  * @out:              Output parameters
279  * @out.group_handle: Handle of a newly created queue group.
280  * @out.padding:      Currently unused, must be zero
281  * @out.group_uid:    UID of the queue group available to base.
282  */
283 union kbase_ioctl_cs_queue_group_create {
284 	struct {
285 		__u64 tiler_mask;
286 		__u64 fragment_mask;
287 		__u64 compute_mask;
288 		__u8 cs_min;
289 		__u8 priority;
290 		__u8 tiler_max;
291 		__u8 fragment_max;
292 		__u8 compute_max;
293 		__u8 csi_handlers;
294 		__u8 padding[2];
295 		/**
296 		 * @in.dvs_buf: buffer for deferred vertex shader
297 		 */
298 		__u64 dvs_buf;
299 	} in;
300 	struct {
301 		__u8 group_handle;
302 		__u8 padding[3];
303 		__u32 group_uid;
304 	} out;
305 };
306 
307 #define KBASE_IOCTL_CS_QUEUE_GROUP_CREATE                                      \
308 	_IOWR(KBASE_IOCTL_TYPE, 58, union kbase_ioctl_cs_queue_group_create)
309 
310 /**
311  * struct kbase_ioctl_cs_queue_group_term - Terminate a GPU command queue group
312  *
313  * @group_handle: Handle of the queue group to be terminated
314  * @padding: Padding to round up to a multiple of 8 bytes, must be zero
315  */
316 struct kbase_ioctl_cs_queue_group_term {
317 	__u8 group_handle;
318 	__u8 padding[7];
319 };
320 
321 #define KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE \
322 	_IOW(KBASE_IOCTL_TYPE, 43, struct kbase_ioctl_cs_queue_group_term)
323 
324 #define KBASE_IOCTL_CS_EVENT_SIGNAL \
325 	_IO(KBASE_IOCTL_TYPE, 44)
326 
327 typedef __u8 base_kcpu_queue_id; /* We support up to 256 active KCPU queues */
328 
329 /**
330  * struct kbase_ioctl_kcpu_queue_new - Create a KCPU command queue
331  *
332  * @id: ID of the new command queue returned by the kernel
333  * @padding: Padding to round up to a multiple of 8 bytes, must be zero
334  */
335 struct kbase_ioctl_kcpu_queue_new {
336 	base_kcpu_queue_id id;
337 	__u8 padding[7];
338 };
339 
340 #define KBASE_IOCTL_KCPU_QUEUE_CREATE \
341 	_IOR(KBASE_IOCTL_TYPE, 45, struct kbase_ioctl_kcpu_queue_new)
342 
343 /**
344  * struct kbase_ioctl_kcpu_queue_delete - Destroy a KCPU command queue
345  *
346  * @id: ID of the command queue to be destroyed
347  * @padding: Padding to round up to a multiple of 8 bytes, must be zero
348  */
349 struct kbase_ioctl_kcpu_queue_delete {
350 	base_kcpu_queue_id id;
351 	__u8 padding[7];
352 };
353 
354 #define KBASE_IOCTL_KCPU_QUEUE_DELETE \
355 	_IOW(KBASE_IOCTL_TYPE, 46, struct kbase_ioctl_kcpu_queue_delete)
356 
357 /**
358  * struct kbase_ioctl_kcpu_queue_enqueue - Enqueue commands into the KCPU queue
359  *
360  * @addr: Memory address of an array of struct base_kcpu_queue_command
361  * @nr_commands: Number of commands in the array
362  * @id: kcpu queue identifier, returned by KBASE_IOCTL_KCPU_QUEUE_CREATE ioctl
363  * @padding: Padding to round up to a multiple of 8 bytes, must be zero
364  */
365 struct kbase_ioctl_kcpu_queue_enqueue {
366 	__u64 addr;
367 	__u32 nr_commands;
368 	base_kcpu_queue_id id;
369 	__u8 padding[3];
370 };
371 
372 #define KBASE_IOCTL_KCPU_QUEUE_ENQUEUE \
373 	_IOW(KBASE_IOCTL_TYPE, 47, struct kbase_ioctl_kcpu_queue_enqueue)
374 
375 /**
376  * union kbase_ioctl_cs_tiler_heap_init - Initialize chunked tiler memory heap
377  * @in:                Input parameters
378  * @in.chunk_size:     Size of each chunk.
379  * @in.initial_chunks: Initial number of chunks that heap will be created with.
380  * @in.max_chunks:     Maximum number of chunks that the heap is allowed to use.
381  * @in.target_in_flight: Number of render-passes that the driver should attempt to
382  *                     keep in flight for which allocation of new chunks is
383  *                     allowed.
384  * @in.group_id:       Group ID to be used for physical allocations.
385  * @in.padding:        Padding
386  * @in.buf_desc_va:    Buffer descriptor GPU VA for tiler heap reclaims.
387  * @out:               Output parameters
388  * @out.gpu_heap_va:   GPU VA (virtual address) of Heap context that was set up
389  *                     for the heap.
390  * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap,
391  *                     actually points to the header of heap chunk and not to
392  *                     the low address of free memory in the chunk.
393  */
394 union kbase_ioctl_cs_tiler_heap_init {
395 	struct {
396 		__u32 chunk_size;
397 		__u32 initial_chunks;
398 		__u32 max_chunks;
399 		__u16 target_in_flight;
400 		__u8 group_id;
401 		__u8 padding;
402 		__u64 buf_desc_va;
403 	} in;
404 	struct {
405 		__u64 gpu_heap_va;
406 		__u64 first_chunk_va;
407 	} out;
408 };
409 
410 #define KBASE_IOCTL_CS_TILER_HEAP_INIT \
411 	_IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init)
412 
413 /**
414  * union kbase_ioctl_cs_tiler_heap_init_1_13 - Initialize chunked tiler memory heap,
415  *                                             earlier version upto 1.13
416  * @in:                Input parameters
417  * @in.chunk_size:     Size of each chunk.
418  * @in.initial_chunks: Initial number of chunks that heap will be created with.
419  * @in.max_chunks:     Maximum number of chunks that the heap is allowed to use.
420  * @in.target_in_flight: Number of render-passes that the driver should attempt to
421  *                     keep in flight for which allocation of new chunks is
422  *                     allowed.
423  * @in.group_id:       Group ID to be used for physical allocations.
424  * @in.padding:        Padding
425  * @out:               Output parameters
426  * @out.gpu_heap_va:   GPU VA (virtual address) of Heap context that was set up
427  *                     for the heap.
428  * @out.first_chunk_va: GPU VA of the first chunk allocated for the heap,
429  *                     actually points to the header of heap chunk and not to
430  *                     the low address of free memory in the chunk.
431  */
432 union kbase_ioctl_cs_tiler_heap_init_1_13 {
433 	struct {
434 		__u32 chunk_size;
435 		__u32 initial_chunks;
436 		__u32 max_chunks;
437 		__u16 target_in_flight;
438 		__u8 group_id;
439 		__u8 padding;
440 	} in;
441 	struct {
442 		__u64 gpu_heap_va;
443 		__u64 first_chunk_va;
444 	} out;
445 };
446 
447 #define KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13                                                        \
448 	_IOWR(KBASE_IOCTL_TYPE, 48, union kbase_ioctl_cs_tiler_heap_init_1_13)
449 
450 /**
451  * struct kbase_ioctl_cs_tiler_heap_term - Terminate a chunked tiler heap
452  *                                         instance
453  *
454  * @gpu_heap_va: GPU VA of Heap context that was set up for the heap.
455  */
456 struct kbase_ioctl_cs_tiler_heap_term {
457 	__u64 gpu_heap_va;
458 };
459 
460 #define KBASE_IOCTL_CS_TILER_HEAP_TERM \
461 	_IOW(KBASE_IOCTL_TYPE, 49, struct kbase_ioctl_cs_tiler_heap_term)
462 
463 /**
464  * union kbase_ioctl_cs_get_glb_iface - Request the global control block
465  *                                        of CSF interface capabilities
466  *
467  * @in:                    Input parameters
468  * @in.max_group_num:      The maximum number of groups to be read. Can be 0, in
469  *                         which case groups_ptr is unused.
470  * @in.max_total_stream_num: The maximum number of CSs to be read. Can be 0, in
471  *                         which case streams_ptr is unused.
472  * @in.groups_ptr:         Pointer where to store all the group data (sequentially).
473  * @in.streams_ptr:        Pointer where to store all the CS data (sequentially).
474  * @out:                   Output parameters
475  * @out.glb_version:       Global interface version.
476  * @out.features:          Bit mask of features (e.g. whether certain types of job
477  *                         can be suspended).
478  * @out.group_num:         Number of CSGs supported.
479  * @out.prfcnt_size:       Size of CSF performance counters, in bytes. Bits 31:16
480  *                         hold the size of firmware performance counter data
481  *                         and 15:0 hold the size of hardware performance counter
482  *                         data.
483  * @out.total_stream_num:  Total number of CSs, summed across all groups.
484  * @out.instr_features:    Instrumentation features. Bits 7:4 hold the maximum
485  *                         size of events. Bits 3:0 hold the offset update rate.
486  *                         (csf >= 1.1.0)
487  *
488  */
489 union kbase_ioctl_cs_get_glb_iface {
490 	struct {
491 		__u32 max_group_num;
492 		__u32 max_total_stream_num;
493 		__u64 groups_ptr;
494 		__u64 streams_ptr;
495 	} in;
496 	struct {
497 		__u32 glb_version;
498 		__u32 features;
499 		__u32 group_num;
500 		__u32 prfcnt_size;
501 		__u32 total_stream_num;
502 		__u32 instr_features;
503 	} out;
504 };
505 
506 #define KBASE_IOCTL_CS_GET_GLB_IFACE \
507 	_IOWR(KBASE_IOCTL_TYPE, 51, union kbase_ioctl_cs_get_glb_iface)
508 
509 struct kbase_ioctl_cs_cpu_queue_info {
510 	__u64 buffer;
511 	__u64 size;
512 };
513 
514 #define KBASE_IOCTL_VERSION_CHECK \
515 	_IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
516 
517 #define KBASE_IOCTL_CS_CPU_QUEUE_DUMP \
518 	_IOW(KBASE_IOCTL_TYPE, 53, struct kbase_ioctl_cs_cpu_queue_info)
519 
520 /**
521  * union kbase_ioctl_mem_alloc_ex - Allocate memory on the GPU
522  * @in: Input parameters
523  * @in.va_pages: The number of pages of virtual address space to reserve
524  * @in.commit_pages: The number of physical pages to allocate
525  * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region
526  * @in.flags: Flags
527  * @in.fixed_address: The GPU virtual address requested for the allocation,
528  *                    if the allocation is using the BASE_MEM_FIXED flag.
529  * @in.extra: Space for extra parameters that may be added in the future.
530  * @out: Output parameters
531  * @out.flags: Flags
532  * @out.gpu_va: The GPU virtual address which is allocated
533  */
534 union kbase_ioctl_mem_alloc_ex {
535 	struct {
536 		__u64 va_pages;
537 		__u64 commit_pages;
538 		__u64 extension;
539 		__u64 flags;
540 		__u64 fixed_address;
541 		__u64 extra[3];
542 	} in;
543 	struct {
544 		__u64 flags;
545 		__u64 gpu_va;
546 	} out;
547 };
548 
549 #define KBASE_IOCTL_MEM_ALLOC_EX _IOWR(KBASE_IOCTL_TYPE, 59, union kbase_ioctl_mem_alloc_ex)
550 
551 /**
552  * union kbase_ioctl_read_user_page - Read a register of USER page
553  *
554  * @in:               Input parameters.
555  * @in.offset:        Register offset in USER page.
556  * @in.padding:       Padding to round up to a multiple of 8 bytes, must be zero.
557  * @out:              Output parameters.
558  * @out.val_lo:       Value of 32bit register or the 1st half of 64bit register to be read.
559  * @out.val_hi:       Value of the 2nd half of 64bit register to be read.
560  */
561 union kbase_ioctl_read_user_page {
562 	struct {
563 		__u32 offset;
564 		__u32 padding;
565 	} in;
566 	struct {
567 		__u32 val_lo;
568 		__u32 val_hi;
569 	} out;
570 };
571 
572 #define KBASE_IOCTL_READ_USER_PAGE _IOWR(KBASE_IOCTL_TYPE, 60, union kbase_ioctl_read_user_page)
573 
574 /***************
575  * test ioctls *
576  ***************/
577 #if MALI_UNIT_TEST
578 /* These ioctls are purely for test purposes and are not used in the production
579  * driver, they therefore may change without notice
580  */
581 
582 /**
583  * struct kbase_ioctl_cs_event_memory_write - Write an event memory address
584  * @cpu_addr: Memory address to write
585  * @value: Value to write
586  * @padding: Currently unused, must be zero
587  */
588 struct kbase_ioctl_cs_event_memory_write {
589 	__u64 cpu_addr;
590 	__u8 value;
591 	__u8 padding[7];
592 };
593 
594 /**
595  * union kbase_ioctl_cs_event_memory_read - Read an event memory address
596  * @in: Input parameters
597  * @in.cpu_addr: Memory address to read
598  * @out: Output parameters
599  * @out.value: Value read
600  * @out.padding: Currently unused, must be zero
601  */
602 union kbase_ioctl_cs_event_memory_read {
603 	struct {
604 		__u64 cpu_addr;
605 	} in;
606 	struct {
607 		__u8 value;
608 		__u8 padding[7];
609 	} out;
610 };
611 
612 #endif /* MALI_UNIT_TEST */
613 
614 #endif /* _UAPI_KBASE_CSF_IOCTL_H_ */
615