xref: /OK3568_Linux_fs/kernel/include/uapi/gpu/arm/bifrost/csf/mali_base_csf_kernel.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _UAPI_BASE_CSF_KERNEL_H_
23 #define _UAPI_BASE_CSF_KERNEL_H_
24 
25 #include <linux/types.h>
26 #include "../mali_base_common_kernel.h"
27 
28 /* Memory allocation, access/hint flags & mask specific to CSF GPU.
29  *
30  * See base_mem_alloc_flags.
31  */
32 
33 /* Must be FIXED memory. */
34 #define BASE_MEM_FIXED ((base_mem_alloc_flags)1 << 8)
35 
36 /* CSF event memory
37  *
38  * If Outer shareable coherence is not specified or not available, then on
39  * allocation kbase will automatically use the uncached GPU mapping.
40  * There is no need for the client to specify BASE_MEM_UNCACHED_GPU
41  * themselves when allocating memory with the BASE_MEM_CSF_EVENT flag.
42  *
43  * This memory requires a permanent mapping
44  *
45  * See also kbase_reg_needs_kernel_mapping()
46  */
47 #define BASE_MEM_CSF_EVENT ((base_mem_alloc_flags)1 << 19)
48 
49 #define BASE_MEM_RESERVED_BIT_20 ((base_mem_alloc_flags)1 << 20)
50 
51 
52 /* Must be FIXABLE memory: its GPU VA will be determined at a later point,
53  * at which time it will be at a fixed GPU VA.
54  */
55 #define BASE_MEM_FIXABLE ((base_mem_alloc_flags)1 << 29)
56 
57 /* Note that the number of bits used for base_mem_alloc_flags
58  * must be less than BASE_MEM_FLAGS_NR_BITS !!!
59  */
60 
61 /* A mask of all the flags which are only valid for allocations within kbase,
62  * and may not be passed from user space.
63  */
64 #define BASEP_MEM_FLAGS_KERNEL_ONLY \
65 	(BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE)
66 
67 /* A mask of all currently reserved flags
68  */
69 #define BASE_MEM_FLAGS_RESERVED BASE_MEM_RESERVED_BIT_20
70 
71 /* Special base mem handles specific to CSF.
72  */
73 #define BASEP_MEM_CSF_USER_REG_PAGE_HANDLE (47ul << LOCAL_PAGE_SHIFT)
74 #define BASEP_MEM_CSF_USER_IO_PAGES_HANDLE (48ul << LOCAL_PAGE_SHIFT)
75 
76 #define KBASE_CSF_NUM_USER_IO_PAGES_HANDLE \
77 	((BASE_MEM_COOKIE_BASE - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE) >> \
78 	 LOCAL_PAGE_SHIFT)
79 
80 /* Valid set of just-in-time memory allocation flags */
81 #define BASE_JIT_ALLOC_VALID_FLAGS ((__u8)0)
82 
83 /* flags for base context specific to CSF */
84 
85 /* Base context creates a CSF event notification thread.
86  *
87  * The creation of a CSF event notification thread is conditional but
88  * mandatory for the handling of CSF events.
89  */
90 #define BASE_CONTEXT_CSF_EVENT_THREAD ((base_context_create_flags)1 << 2)
91 
92 /* Bitpattern describing the ::base_context_create_flags that can be
93  * passed to base_context_init()
94  */
95 #define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
96 	(BASE_CONTEXT_CCTX_EMBEDDED | \
97 	 BASE_CONTEXT_CSF_EVENT_THREAD | \
98 	 BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
99 
100 /* Flags for base tracepoint specific to CSF */
101 
102 /* Enable KBase tracepoints for CSF builds */
103 #define BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS (1 << 2)
104 
105 /* Enable additional CSF Firmware side tracepoints */
106 #define BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS (1 << 3)
107 
108 #define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
109 		BASE_TLSTREAM_JOB_DUMPING_ENABLED | \
110 		BASE_TLSTREAM_ENABLE_CSF_TRACEPOINTS | \
111 		BASE_TLSTREAM_ENABLE_CSFFW_TRACEPOINTS)
112 
113 /* Number of pages mapped into the process address space for a bound GPU
114  * command queue. A pair of input/output pages and a Hw doorbell page
115  * are mapped to enable direct submission of commands to Hw.
116  */
117 #define BASEP_QUEUE_NR_MMAP_USER_PAGES ((size_t)3)
118 
119 #define BASE_QUEUE_MAX_PRIORITY (15U)
120 
121 /* Sync32 object fields definition */
122 #define BASEP_EVENT32_VAL_OFFSET (0U)
123 #define BASEP_EVENT32_ERR_OFFSET (4U)
124 #define BASEP_EVENT32_SIZE_BYTES (8U)
125 
126 /* Sync64 object fields definition */
127 #define BASEP_EVENT64_VAL_OFFSET (0U)
128 #define BASEP_EVENT64_ERR_OFFSET (8U)
129 #define BASEP_EVENT64_SIZE_BYTES (16U)
130 
131 /* Sync32 object alignment, equal to its size */
132 #define BASEP_EVENT32_ALIGN_BYTES (8U)
133 
134 /* Sync64 object alignment, equal to its size */
135 #define BASEP_EVENT64_ALIGN_BYTES (16U)
136 
137 /* The upper limit for number of objects that could be waited/set per command.
138  * This limit is now enforced as internally the error inherit inputs are
139  * converted to 32-bit flags in a __u32 variable occupying a previously padding
140  * field.
141  */
142 #define BASEP_KCPU_CQS_MAX_NUM_OBJS ((size_t)32)
143 
144 /* CSF CSI EXCEPTION_HANDLER_FLAGS */
145 #define BASE_CSF_TILER_OOM_EXCEPTION_FLAG (1u << 0)
146 #define BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK (BASE_CSF_TILER_OOM_EXCEPTION_FLAG)
147 
148 /* Initial value for LATEST_FLUSH register */
149 #define POWER_DOWN_LATEST_FLUSH_VALUE ((uint32_t)1)
150 
151 /**
152  * enum base_kcpu_command_type - Kernel CPU queue command type.
153  * @BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL:       fence_signal,
154  * @BASE_KCPU_COMMAND_TYPE_FENCE_WAIT:         fence_wait,
155  * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT:           cqs_wait,
156  * @BASE_KCPU_COMMAND_TYPE_CQS_SET:            cqs_set,
157  * @BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION: cqs_wait_operation,
158  * @BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION:  cqs_set_operation,
159  * @BASE_KCPU_COMMAND_TYPE_MAP_IMPORT:         map_import,
160  * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT:       unmap_import,
161  * @BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE: unmap_import_force,
162  * @BASE_KCPU_COMMAND_TYPE_JIT_ALLOC:          jit_alloc,
163  * @BASE_KCPU_COMMAND_TYPE_JIT_FREE:           jit_free,
164  * @BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND:      group_suspend,
165  * @BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER:      error_barrier,
166  */
167 enum base_kcpu_command_type {
168 	BASE_KCPU_COMMAND_TYPE_FENCE_SIGNAL,
169 	BASE_KCPU_COMMAND_TYPE_FENCE_WAIT,
170 	BASE_KCPU_COMMAND_TYPE_CQS_WAIT,
171 	BASE_KCPU_COMMAND_TYPE_CQS_SET,
172 	BASE_KCPU_COMMAND_TYPE_CQS_WAIT_OPERATION,
173 	BASE_KCPU_COMMAND_TYPE_CQS_SET_OPERATION,
174 	BASE_KCPU_COMMAND_TYPE_MAP_IMPORT,
175 	BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT,
176 	BASE_KCPU_COMMAND_TYPE_UNMAP_IMPORT_FORCE,
177 	BASE_KCPU_COMMAND_TYPE_JIT_ALLOC,
178 	BASE_KCPU_COMMAND_TYPE_JIT_FREE,
179 	BASE_KCPU_COMMAND_TYPE_GROUP_SUSPEND,
180 	BASE_KCPU_COMMAND_TYPE_ERROR_BARRIER
181 };
182 
183 /**
184  * enum base_queue_group_priority - Priority of a GPU Command Queue Group.
185  * @BASE_QUEUE_GROUP_PRIORITY_HIGH:     GPU Command Queue Group is of high
186  *                                      priority.
187  * @BASE_QUEUE_GROUP_PRIORITY_MEDIUM:   GPU Command Queue Group is of medium
188  *                                      priority.
189  * @BASE_QUEUE_GROUP_PRIORITY_LOW:      GPU Command Queue Group is of low
190  *                                      priority.
191  * @BASE_QUEUE_GROUP_PRIORITY_REALTIME: GPU Command Queue Group is of real-time
192  *                                      priority.
193  * @BASE_QUEUE_GROUP_PRIORITY_COUNT:    Number of GPU Command Queue Group
194  *                                      priority levels.
195  *
196  * Currently this is in order of highest to lowest, but if new levels are added
197  * then those new levels may be out of order to preserve the ABI compatibility
198  * with previous releases. At that point, ensure assignment to
199  * the 'priority' member in &kbase_queue_group is updated to ensure it remains
200  * a linear ordering.
201  *
202  * There should be no gaps in the enum, otherwise use of
203  * BASE_QUEUE_GROUP_PRIORITY_COUNT in kbase must be updated.
204  */
205 enum base_queue_group_priority {
206 	BASE_QUEUE_GROUP_PRIORITY_HIGH = 0,
207 	BASE_QUEUE_GROUP_PRIORITY_MEDIUM,
208 	BASE_QUEUE_GROUP_PRIORITY_LOW,
209 	BASE_QUEUE_GROUP_PRIORITY_REALTIME,
210 	BASE_QUEUE_GROUP_PRIORITY_COUNT
211 };
212 
213 struct base_kcpu_command_fence_info {
214 	__u64 fence;
215 };
216 
217 struct base_cqs_wait_info {
218 	__u64 addr;
219 	__u32 val;
220 	__u32 padding;
221 };
222 
223 struct base_kcpu_command_cqs_wait_info {
224 	__u64 objs;
225 	__u32 nr_objs;
226 	__u32 inherit_err_flags;
227 };
228 
229 struct base_cqs_set {
230 	__u64 addr;
231 };
232 
233 struct base_kcpu_command_cqs_set_info {
234 	__u64 objs;
235 	__u32 nr_objs;
236 	__u32 padding;
237 };
238 
239 /**
240  * typedef basep_cqs_data_type - Enumeration of CQS Data Types
241  *
242  * @BASEP_CQS_DATA_TYPE_U32: The Data Type of a CQS Object's value
243  *                           is an unsigned 32-bit integer
244  * @BASEP_CQS_DATA_TYPE_U64: The Data Type of a CQS Object's value
245  *                           is an unsigned 64-bit integer
246  */
247 typedef enum PACKED {
248 	BASEP_CQS_DATA_TYPE_U32 = 0,
249 	BASEP_CQS_DATA_TYPE_U64 = 1,
250 } basep_cqs_data_type;
251 
252 /**
253  * typedef basep_cqs_wait_operation_op - Enumeration of CQS Object Wait
254  *                                Operation conditions
255  *
256  * @BASEP_CQS_WAIT_OPERATION_LE: CQS Wait Operation indicating that a
257  *                                wait will be satisfied when a CQS Object's
258  *                                value is Less than or Equal to
259  *                                the Wait Operation value
260  * @BASEP_CQS_WAIT_OPERATION_GT: CQS Wait Operation indicating that a
261  *                                wait will be satisfied when a CQS Object's
262  *                                value is Greater than the Wait Operation value
263  */
264 typedef enum {
265 	BASEP_CQS_WAIT_OPERATION_LE = 0,
266 	BASEP_CQS_WAIT_OPERATION_GT = 1,
267 } basep_cqs_wait_operation_op;
268 
269 struct base_cqs_wait_operation_info {
270 	__u64 addr;
271 	__u64 val;
272 	__u8 operation;
273 	__u8 data_type;
274 	__u8 padding[6];
275 };
276 
277 /**
278  * struct base_kcpu_command_cqs_wait_operation_info - structure which contains information
279  *		about the Timeline CQS wait objects
280  *
281  * @objs:              An array of Timeline CQS waits.
282  * @nr_objs:           Number of Timeline CQS waits in the array.
283  * @inherit_err_flags: Bit-pattern for the CQSs in the array who's error field
284  *                     to be served as the source for importing into the
285  *                     queue's error-state.
286  */
287 struct base_kcpu_command_cqs_wait_operation_info {
288 	__u64 objs;
289 	__u32 nr_objs;
290 	__u32 inherit_err_flags;
291 };
292 
293 /**
294  * typedef basep_cqs_set_operation_op - Enumeration of CQS Set Operations
295  *
296  * @BASEP_CQS_SET_OPERATION_ADD: CQS Set operation for adding a value
297  *                                to a synchronization object
298  * @BASEP_CQS_SET_OPERATION_SET: CQS Set operation for setting the value
299  *                                of a synchronization object
300  */
301 typedef enum {
302 	BASEP_CQS_SET_OPERATION_ADD = 0,
303 	BASEP_CQS_SET_OPERATION_SET = 1,
304 } basep_cqs_set_operation_op;
305 
306 struct base_cqs_set_operation_info {
307 	__u64 addr;
308 	__u64 val;
309 	__u8 operation;
310 	__u8 data_type;
311 	__u8 padding[6];
312 };
313 
314 /**
315  * struct base_kcpu_command_cqs_set_operation_info - structure which contains information
316  *		about the Timeline CQS set objects
317  *
318  * @objs:    An array of Timeline CQS sets.
319  * @nr_objs: Number of Timeline CQS sets in the array.
320  * @padding: Structure padding, unused bytes.
321  */
322 struct base_kcpu_command_cqs_set_operation_info {
323 	__u64 objs;
324 	__u32 nr_objs;
325 	__u32 padding;
326 };
327 
328 /**
329  * struct base_kcpu_command_import_info - structure which contains information
330  *		about the imported buffer.
331  *
332  * @handle:	Address of imported user buffer.
333  */
334 struct base_kcpu_command_import_info {
335 	__u64 handle;
336 };
337 
338 /**
339  * struct base_kcpu_command_jit_alloc_info - structure which contains
340  *		information about jit memory allocation.
341  *
342  * @info:	An array of elements of the
343  *		struct base_jit_alloc_info type.
344  * @count:	The number of elements in the info array.
345  * @padding:	Padding to a multiple of 64 bits.
346  */
347 struct base_kcpu_command_jit_alloc_info {
348 	__u64 info;
349 	__u8 count;
350 	__u8 padding[7];
351 };
352 
353 /**
354  * struct base_kcpu_command_jit_free_info - structure which contains
355  *		information about jit memory which is to be freed.
356  *
357  * @ids:	An array containing the JIT IDs to free.
358  * @count:	The number of elements in the ids array.
359  * @padding:	Padding to a multiple of 64 bits.
360  */
361 struct base_kcpu_command_jit_free_info {
362 	__u64 ids;
363 	__u8 count;
364 	__u8 padding[7];
365 };
366 
367 /**
368  * struct base_kcpu_command_group_suspend_info - structure which contains
369  *		suspend buffer data captured for a suspended queue group.
370  *
371  * @buffer:		Pointer to an array of elements of the type char.
372  * @size:		Number of elements in the @buffer array.
373  * @group_handle:	Handle to the mapping of CSG.
374  * @padding:		padding to a multiple of 64 bits.
375  */
376 struct base_kcpu_command_group_suspend_info {
377 	__u64 buffer;
378 	__u32 size;
379 	__u8 group_handle;
380 	__u8 padding[3];
381 };
382 
383 
384 /**
385  * struct base_kcpu_command - kcpu command.
386  * @type:	type of the kcpu command, one enum base_kcpu_command_type
387  * @padding:	padding to a multiple of 64 bits
388  * @info:	structure which contains information about the kcpu command;
389  *		actual type is determined by @p type
390  * @info.fence:              Fence
391  * @info.cqs_wait:           CQS wait
392  * @info.cqs_set:            CQS set
393  * @info.cqs_wait_operation: CQS wait operation
394  * @info.cqs_set_operation:  CQS set operation
395  * @info.import:             import
396  * @info.jit_alloc:          JIT allocation
397  * @info.jit_free:           JIT deallocation
398  * @info.suspend_buf_copy:   suspend buffer copy
399  * @info.sample_time:        sample time
400  * @info.padding:            padding
401  */
402 struct base_kcpu_command {
403 	__u8 type;
404 	__u8 padding[sizeof(__u64) - sizeof(__u8)];
405 	union {
406 		struct base_kcpu_command_fence_info fence;
407 		struct base_kcpu_command_cqs_wait_info cqs_wait;
408 		struct base_kcpu_command_cqs_set_info cqs_set;
409 		struct base_kcpu_command_cqs_wait_operation_info cqs_wait_operation;
410 		struct base_kcpu_command_cqs_set_operation_info cqs_set_operation;
411 		struct base_kcpu_command_import_info import;
412 		struct base_kcpu_command_jit_alloc_info jit_alloc;
413 		struct base_kcpu_command_jit_free_info jit_free;
414 		struct base_kcpu_command_group_suspend_info suspend_buf_copy;
415 		__u64 padding[2]; /* No sub-struct should be larger */
416 	} info;
417 };
418 
419 /**
420  * struct basep_cs_stream_control - CSI capabilities.
421  *
422  * @features: Features of this stream
423  * @padding:  Padding to a multiple of 64 bits.
424  */
425 struct basep_cs_stream_control {
426 	__u32 features;
427 	__u32 padding;
428 };
429 
430 /**
431  * struct basep_cs_group_control - CSG interface capabilities.
432  *
433  * @features:     Features of this group
434  * @stream_num:   Number of streams in this group
435  * @suspend_size: Size in bytes of the suspend buffer for this group
436  * @padding:      Padding to a multiple of 64 bits.
437  */
438 struct basep_cs_group_control {
439 	__u32 features;
440 	__u32 stream_num;
441 	__u32 suspend_size;
442 	__u32 padding;
443 };
444 
445 /**
446  * struct base_gpu_queue_group_error_fatal_payload - Unrecoverable fault
447  *        error information associated with GPU command queue group.
448  *
449  * @sideband:     Additional information of the unrecoverable fault.
450  * @status:       Unrecoverable fault information.
451  *                This consists of exception type (least significant byte) and
452  *                data (remaining bytes). One example of exception type is
453  *                CS_INVALID_INSTRUCTION (0x49).
454  * @padding:      Padding to make multiple of 64bits
455  */
456 struct base_gpu_queue_group_error_fatal_payload {
457 	__u64 sideband;
458 	__u32 status;
459 	__u32 padding;
460 };
461 
462 /**
463  * struct base_gpu_queue_error_fatal_payload - Unrecoverable fault
464  *        error information related to GPU command queue.
465  *
466  * @sideband:     Additional information about this unrecoverable fault.
467  * @status:       Unrecoverable fault information.
468  *                This consists of exception type (least significant byte) and
469  *                data (remaining bytes). One example of exception type is
470  *                CS_INVALID_INSTRUCTION (0x49).
471  * @csi_index:    Index of the CSF interface the queue is bound to.
472  * @padding:      Padding to make multiple of 64bits
473  */
474 struct base_gpu_queue_error_fatal_payload {
475 	__u64 sideband;
476 	__u32 status;
477 	__u8 csi_index;
478 	__u8 padding[3];
479 };
480 
481 /**
482  * enum base_gpu_queue_group_error_type - GPU Fatal error type.
483  *
484  * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL:       Fatal error associated with GPU
485  *                                          command queue group.
486  * @BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL: Fatal error associated with GPU
487  *                                          command queue.
488  * @BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT:     Fatal error associated with
489  *                                          progress timeout.
490  * @BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM: Fatal error due to running out
491  *                                             of tiler heap memory.
492  * @BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT: The number of fatal error types
493  *
494  * This type is used for &struct_base_gpu_queue_group_error.error_type.
495  */
496 enum base_gpu_queue_group_error_type {
497 	BASE_GPU_QUEUE_GROUP_ERROR_FATAL = 0,
498 	BASE_GPU_QUEUE_GROUP_QUEUE_ERROR_FATAL,
499 	BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT,
500 	BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM,
501 	BASE_GPU_QUEUE_GROUP_ERROR_FATAL_COUNT
502 };
503 
504 /**
505  * struct base_gpu_queue_group_error - Unrecoverable fault information
506  * @error_type:          Error type of @base_gpu_queue_group_error_type
507  *                       indicating which field in union payload is filled
508  * @padding:             Unused bytes for 64bit boundary
509  * @payload:             Input Payload
510  * @payload.fatal_group: Unrecoverable fault error associated with
511  *                       GPU command queue group
512  * @payload.fatal_queue: Unrecoverable fault error associated with command queue
513  */
514 struct base_gpu_queue_group_error {
515 	__u8 error_type;
516 	__u8 padding[7];
517 	union {
518 		struct base_gpu_queue_group_error_fatal_payload fatal_group;
519 		struct base_gpu_queue_error_fatal_payload fatal_queue;
520 	} payload;
521 };
522 
523 /**
524  * enum base_csf_notification_type - Notification type
525  *
526  * @BASE_CSF_NOTIFICATION_EVENT:                 Notification with kernel event
527  * @BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR: Notification with GPU fatal
528  *                                               error
529  * @BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP:        Notification with dumping cpu
530  *                                               queue
531  * @BASE_CSF_NOTIFICATION_COUNT:                 The number of notification type
532  *
533  * This type is used for &struct_base_csf_notification.type.
534  */
535 enum base_csf_notification_type {
536 	BASE_CSF_NOTIFICATION_EVENT = 0,
537 	BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR,
538 	BASE_CSF_NOTIFICATION_CPU_QUEUE_DUMP,
539 	BASE_CSF_NOTIFICATION_COUNT
540 };
541 
542 /**
543  * struct base_csf_notification - Event or error notification
544  *
545  * @type:                      Notification type of @base_csf_notification_type
546  * @padding:                   Padding for 64bit boundary
547  * @payload:                   Input Payload
548  * @payload.align:             To fit the struct into a 64-byte cache line
549  * @payload.csg_error:         CSG error
550  * @payload.csg_error.handle:  Handle of GPU command queue group associated with
551  *                             fatal error
552  * @payload.csg_error.padding: Padding
553  * @payload.csg_error.error:   Unrecoverable fault error
554  *
555  */
556 struct base_csf_notification {
557 	__u8 type;
558 	__u8 padding[7];
559 	union {
560 		struct {
561 			__u8 handle;
562 			__u8 padding[7];
563 			struct base_gpu_queue_group_error error;
564 		} csg_error;
565 
566 		__u8 align[56];
567 	} payload;
568 };
569 
570 /**
571  * struct mali_base_gpu_core_props - GPU core props info
572  *
573  * @product_id: Pro specific value.
574  * @version_status: Status of the GPU release. No defined values, but starts at
575  *   0 and increases by one for each release status (alpha, beta, EAC, etc.).
576  *   4 bit values (0-15).
577  * @minor_revision: Minor release number of the GPU. "P" part of an "RnPn"
578  *   release number.
579  *   8 bit values (0-255).
580  * @major_revision: Major release number of the GPU. "R" part of an "RnPn"
581  *   release number.
582  *   4 bit values (0-15).
583  * @padding: padding to align to 8-byte
584  * @gpu_freq_khz_max: The maximum GPU frequency. Reported to applications by
585  *   clGetDeviceInfo()
586  * @log2_program_counter_size: Size of the shader program counter, in bits.
587  * @texture_features: TEXTURE_FEATURES_x registers, as exposed by the GPU. This
588  *   is a bitpattern where a set bit indicates that the format is supported.
589  *   Before using a texture format, it is recommended that the corresponding
590  *   bit be checked.
591  * @paddings: Padding bytes.
592  * @gpu_available_memory_size: Theoretical maximum memory available to the GPU.
593  *   It is unlikely that a client will be able to allocate all of this memory
594  *   for their own purposes, but this at least provides an upper bound on the
595  *   memory available to the GPU.
596  *   This is required for OpenCL's clGetDeviceInfo() call when
597  *   CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
598  *   client will not be expecting to allocate anywhere near this value.
599  */
600 struct mali_base_gpu_core_props {
601 	__u32 product_id;
602 	__u16 version_status;
603 	__u16 minor_revision;
604 	__u16 major_revision;
605 	__u16 padding;
606 	__u32 gpu_freq_khz_max;
607 	__u32 log2_program_counter_size;
608 	__u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
609 	__u8 paddings[4];
610 	__u64 gpu_available_memory_size;
611 };
612 
613 #endif /* _UAPI_BASE_CSF_KERNEL_H_ */
614