1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the
6*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software
7*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms
8*4882a593Smuzhiyun * of such GNU licence.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * A copy of the licence is included with the program, and can also be obtained
11*4882a593Smuzhiyun * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12*4882a593Smuzhiyun * Boston, MA 02110-1301, USA.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun * @file
22*4882a593Smuzhiyun * Base structures shared with the kernel.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifndef _BASE_KERNEL_H_
26*4882a593Smuzhiyun #define _BASE_KERNEL_H_
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #ifndef __user
29*4882a593Smuzhiyun #define __user
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Support UK6 IOCTLS */
33*4882a593Smuzhiyun #define BASE_LEGACY_UK6_SUPPORT 1
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* Support UK7 IOCTLS */
36*4882a593Smuzhiyun /* NB: To support UK6 we also need to support UK7 */
37*4882a593Smuzhiyun #define BASE_LEGACY_UK7_SUPPORT 1
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* Support UK8 IOCTLS */
40*4882a593Smuzhiyun #define BASE_LEGACY_UK8_SUPPORT 1
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Support UK9 IOCTLS */
43*4882a593Smuzhiyun #define BASE_LEGACY_UK9_SUPPORT 1
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Support UK10_2 IOCTLS */
46*4882a593Smuzhiyun #define BASE_LEGACY_UK10_2_SUPPORT 1
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Support UK10_4 IOCTLS */
49*4882a593Smuzhiyun #define BASE_LEGACY_UK10_4_SUPPORT 1
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun typedef struct base_mem_handle {
52*4882a593Smuzhiyun struct {
53*4882a593Smuzhiyun u64 handle;
54*4882a593Smuzhiyun } basep;
55*4882a593Smuzhiyun } base_mem_handle;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #include "mali_base_mem_priv.h"
58*4882a593Smuzhiyun #include "mali_kbase_profiling_gator_api.h"
59*4882a593Smuzhiyun #include "mali_midg_coherency.h"
60*4882a593Smuzhiyun #include "mali_kbase_gpu_id.h"
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Dependency stuff, keep it private for now. May want to expose it if
64*4882a593Smuzhiyun * we decide to make the number of semaphores a configurable
65*4882a593Smuzhiyun * option.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun #define BASE_JD_ATOM_COUNT 512
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define BASEP_JD_SEM_PER_WORD_LOG2 5
70*4882a593Smuzhiyun #define BASEP_JD_SEM_PER_WORD (1 << BASEP_JD_SEM_PER_WORD_LOG2)
71*4882a593Smuzhiyun #define BASEP_JD_SEM_WORD_NR(x) ((x) >> BASEP_JD_SEM_PER_WORD_LOG2)
72*4882a593Smuzhiyun #define BASEP_JD_SEM_MASK_IN_WORD(x) (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1)))
73*4882a593Smuzhiyun #define BASEP_JD_SEM_ARRAY_SIZE BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT)
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* Set/reset values for a software event */
76*4882a593Smuzhiyun #define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
77*4882a593Smuzhiyun #define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define BASE_MAX_COHERENT_GROUPS 16
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #if defined CDBG_ASSERT
84*4882a593Smuzhiyun #define LOCAL_ASSERT CDBG_ASSERT
85*4882a593Smuzhiyun #elif defined KBASE_DEBUG_ASSERT
86*4882a593Smuzhiyun #define LOCAL_ASSERT KBASE_DEBUG_ASSERT
87*4882a593Smuzhiyun #else
88*4882a593Smuzhiyun #error assert macro not defined!
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #if defined PAGE_MASK
92*4882a593Smuzhiyun #define LOCAL_PAGE_LSB ~PAGE_MASK
93*4882a593Smuzhiyun #else
94*4882a593Smuzhiyun #include <osu/mali_osu.h>
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun #if defined OSU_CONFIG_CPU_PAGE_SIZE_LOG2
97*4882a593Smuzhiyun #define LOCAL_PAGE_LSB ((1ul << OSU_CONFIG_CPU_PAGE_SIZE_LOG2) - 1)
98*4882a593Smuzhiyun #else
99*4882a593Smuzhiyun #error Failed to find page size
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /** 32/64-bit neutral way to represent pointers */
104*4882a593Smuzhiyun typedef union kbase_pointer {
105*4882a593Smuzhiyun void __user *value; /**< client should store their pointers here */
106*4882a593Smuzhiyun u32 compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */
107*4882a593Smuzhiyun u64 sizer; /**< Force 64-bit storage for all clients regardless */
108*4882a593Smuzhiyun } kbase_pointer;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun * @addtogroup base_user_api User-side Base APIs
112*4882a593Smuzhiyun * @{
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun * @addtogroup base_user_api_memory User-side Base Memory APIs
117*4882a593Smuzhiyun * @{
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun * typedef base_mem_alloc_flags - Memory allocation, access/hint flags.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * A combination of MEM_PROT/MEM_HINT flags must be passed to each allocator
124*4882a593Smuzhiyun * in order to determine the best cache policy. Some combinations are
125*4882a593Smuzhiyun * of course invalid (e.g. MEM_PROT_CPU_WR | MEM_HINT_CPU_RD),
126*4882a593Smuzhiyun * which defines a write-only region on the CPU side, which is
127*4882a593Smuzhiyun * heavily read by the CPU...
128*4882a593Smuzhiyun * Other flags are only meaningful to a particular allocator.
129*4882a593Smuzhiyun * More flags can be added to this list, as long as they don't clash
130*4882a593Smuzhiyun * (see BASE_MEM_FLAGS_NR_BITS for the number of the first free bit).
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun typedef u32 base_mem_alloc_flags;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Memory allocation, access/hint flags.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * See base_mem_alloc_flags.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* IN */
140*4882a593Smuzhiyun /* Read access CPU side
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun #define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Write access CPU side
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun #define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* Read access GPU side
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun #define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Write access GPU side
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun #define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Execute allowed on the GPU side
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun #define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* BASE_MEM_HINT flags have been removed, but their values are reserved
161*4882a593Smuzhiyun * for backwards compatibility with older user-space drivers. The values
162*4882a593Smuzhiyun * can be re-used once support for r5p0 user-space drivers is removed,
163*4882a593Smuzhiyun * presumably in r7p0.
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * RESERVED: (1U << 5)
166*4882a593Smuzhiyun * RESERVED: (1U << 6)
167*4882a593Smuzhiyun * RESERVED: (1U << 7)
168*4882a593Smuzhiyun * RESERVED: (1U << 8)
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Grow backing store on GPU Page Fault
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun #define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Page coherence Outer shareable, if available
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun #define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* Page coherence Inner shareable
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun #define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Should be cached on the CPU
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun #define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* IN/OUT */
188*4882a593Smuzhiyun /* Must have same VA on both the GPU and the CPU
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun #define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* OUT */
193*4882a593Smuzhiyun /* Must call mmap to acquire a GPU address for the alloc
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun #define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* IN */
198*4882a593Smuzhiyun /* Page coherence Outer shareable, required.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun #define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Secure memory
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun #define BASE_MEM_SECURE ((base_mem_alloc_flags)1 << 16)
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* Not needed physical memory
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun #define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
211*4882a593Smuzhiyun * addresses to be the same
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun #define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Number of bits used as flags for base memory management
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * Must be kept in sync with the base_mem_alloc_flags flags
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun #define BASE_MEM_FLAGS_NR_BITS 19
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* A mask for all output bits, excluding IN/OUT bits.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun #define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* A mask for all input bits, including IN/OUT bits.
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun #define BASE_MEM_FLAGS_INPUT_MASK \
228*4882a593Smuzhiyun (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* A mask for all the flags which are modifiable via the base_mem_set_flags
231*4882a593Smuzhiyun * interface.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun #define BASE_MEM_FLAGS_MODIFIABLE \
234*4882a593Smuzhiyun (BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
235*4882a593Smuzhiyun BASE_MEM_COHERENT_LOCAL)
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * enum base_mem_import_type - Memory types supported by @a base_mem_import
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
241*4882a593Smuzhiyun * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id.
242*4882a593Smuzhiyun * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
243*4882a593Smuzhiyun * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
244*4882a593Smuzhiyun * base_mem_import_user_buffer
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * Each type defines what the supported handle type is.
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun * If any new type is added here ARM must be contacted
249*4882a593Smuzhiyun * to allocate a numeric value for it.
250*4882a593Smuzhiyun * Do not just add a new type without synchronizing with ARM
251*4882a593Smuzhiyun * as future releases from ARM might include other new types
252*4882a593Smuzhiyun * which could clash with your custom types.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun typedef enum base_mem_import_type {
255*4882a593Smuzhiyun BASE_MEM_IMPORT_TYPE_INVALID = 0,
256*4882a593Smuzhiyun BASE_MEM_IMPORT_TYPE_UMP = 1,
257*4882a593Smuzhiyun BASE_MEM_IMPORT_TYPE_UMM = 2,
258*4882a593Smuzhiyun BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
259*4882a593Smuzhiyun } base_mem_import_type;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun * struct base_mem_import_user_buffer - Handle of an imported user buffer
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun * @ptr: kbase_pointer to imported user buffer
265*4882a593Smuzhiyun * @length: length of imported user buffer in bytes
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * This structure is used to represent a handle of an imported user buffer.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun struct base_mem_import_user_buffer {
271*4882a593Smuzhiyun kbase_pointer ptr;
272*4882a593Smuzhiyun u64 length;
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * @brief Invalid memory handle.
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * Return value from functions returning @ref base_mem_handle on error.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
281*4882a593Smuzhiyun * in C++ code or other situations where compound literals cannot be used.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun #define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * @brief Special write-alloc memory handle.
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * A special handle is used to represent a region where a special page is mapped
289*4882a593Smuzhiyun * with a write-alloc cache setup, typically used when the write result of the
290*4882a593Smuzhiyun * GPU isn't needed, but the GPU must write anyway.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
293*4882a593Smuzhiyun * in C++ code or other situations where compound literals cannot be used.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun #define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun #define BASEP_MEM_INVALID_HANDLE (0ull << 12)
298*4882a593Smuzhiyun #define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
299*4882a593Smuzhiyun #define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
300*4882a593Smuzhiyun #define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
301*4882a593Smuzhiyun #define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
302*4882a593Smuzhiyun /* reserved handles ..-64<<PAGE_SHIFT> for future special handles */
303*4882a593Smuzhiyun #define BASE_MEM_COOKIE_BASE (64ul << 12)
304*4882a593Smuzhiyun #define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
305*4882a593Smuzhiyun BASE_MEM_COOKIE_BASE)
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Mask to detect 4GB boundary alignment */
308*4882a593Smuzhiyun #define BASE_MEM_MASK_4GB 0xfffff000UL
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Bit mask of cookies used for for memory allocation setup */
312*4882a593Smuzhiyun #define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun * @brief Result codes of changing the size of the backing store allocated to a tmem region
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun typedef enum base_backing_threshold_status {
319*4882a593Smuzhiyun BASE_BACKING_THRESHOLD_OK = 0, /**< Resize successful */
320*4882a593Smuzhiyun BASE_BACKING_THRESHOLD_ERROR_OOM = -2, /**< Increase failed due to an out-of-memory condition */
321*4882a593Smuzhiyun BASE_BACKING_THRESHOLD_ERROR_INVALID_ARGUMENTS = -4 /**< Invalid arguments (not tmem, illegal size request, etc.) */
322*4882a593Smuzhiyun } base_backing_threshold_status;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /**
325*4882a593Smuzhiyun * @addtogroup base_user_api_memory_defered User-side Base Defered Memory Coherency APIs
326*4882a593Smuzhiyun * @{
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /**
330*4882a593Smuzhiyun * @brief a basic memory operation (sync-set).
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * The content of this structure is private, and should only be used
333*4882a593Smuzhiyun * by the accessors.
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun typedef struct base_syncset {
336*4882a593Smuzhiyun struct basep_syncset basep_sset;
337*4882a593Smuzhiyun } base_syncset;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /** @} end group base_user_api_memory_defered */
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun * Handle to represent imported memory object.
343*4882a593Smuzhiyun * Simple opague handle to imported memory, can't be used
344*4882a593Smuzhiyun * with anything but base_external_resource_init to bind to an atom.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun typedef struct base_import_handle {
347*4882a593Smuzhiyun struct {
348*4882a593Smuzhiyun u64 handle;
349*4882a593Smuzhiyun } basep;
350*4882a593Smuzhiyun } base_import_handle;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /** @} end group base_user_api_memory */
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /**
355*4882a593Smuzhiyun * @addtogroup base_user_api_job_dispatch User-side Base Job Dispatcher APIs
356*4882a593Smuzhiyun * @{
357*4882a593Smuzhiyun */
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun typedef int platform_fence_type;
360*4882a593Smuzhiyun #define INVALID_PLATFORM_FENCE ((platform_fence_type)-1)
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /**
363*4882a593Smuzhiyun * Base stream handle.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * References an underlying base stream object.
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun typedef struct base_stream {
368*4882a593Smuzhiyun struct {
369*4882a593Smuzhiyun int fd;
370*4882a593Smuzhiyun } basep;
371*4882a593Smuzhiyun } base_stream;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /**
374*4882a593Smuzhiyun * Base fence handle.
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * References an underlying base fence object.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun typedef struct base_fence {
379*4882a593Smuzhiyun struct {
380*4882a593Smuzhiyun int fd;
381*4882a593Smuzhiyun int stream_fd;
382*4882a593Smuzhiyun } basep;
383*4882a593Smuzhiyun } base_fence;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /**
386*4882a593Smuzhiyun * @brief Per-job data
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * This structure is used to store per-job data, and is completely unused
389*4882a593Smuzhiyun * by the Base driver. It can be used to store things such as callback
390*4882a593Smuzhiyun * function pointer, data to handle job completion. It is guaranteed to be
391*4882a593Smuzhiyun * untouched by the Base driver.
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun typedef struct base_jd_udata {
394*4882a593Smuzhiyun u64 blob[2]; /**< per-job data array */
395*4882a593Smuzhiyun } base_jd_udata;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /**
398*4882a593Smuzhiyun * @brief Memory aliasing info
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * Describes a memory handle to be aliased.
401*4882a593Smuzhiyun * A subset of the handle can be chosen for aliasing, given an offset and a
402*4882a593Smuzhiyun * length.
403*4882a593Smuzhiyun * A special handle BASE_MEM_WRITE_ALLOC_PAGES_HANDLE is used to represent a
404*4882a593Smuzhiyun * region where a special page is mapped with a write-alloc cache setup,
405*4882a593Smuzhiyun * typically used when the write result of the GPU isn't needed, but the GPU
406*4882a593Smuzhiyun * must write anyway.
407*4882a593Smuzhiyun *
408*4882a593Smuzhiyun * Offset and length are specified in pages.
409*4882a593Smuzhiyun * Offset must be within the size of the handle.
410*4882a593Smuzhiyun * Offset+length must not overrun the size of the handle.
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * @handle Handle to alias, can be BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
413*4882a593Smuzhiyun * @offset Offset within the handle to start aliasing from, in pages.
414*4882a593Smuzhiyun * Not used with BASE_MEM_WRITE_ALLOC_PAGES_HANDLE.
415*4882a593Smuzhiyun * @length Length to alias, in pages. For BASE_MEM_WRITE_ALLOC_PAGES_HANDLE
416*4882a593Smuzhiyun * specifies the number of times the special page is needed.
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun struct base_mem_aliasing_info {
419*4882a593Smuzhiyun base_mem_handle handle;
420*4882a593Smuzhiyun u64 offset;
421*4882a593Smuzhiyun u64 length;
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun * struct base_jit_alloc_info - Structure which describes a JIT allocation
426*4882a593Smuzhiyun * request.
427*4882a593Smuzhiyun * @gpu_alloc_addr: The GPU virtual address to write the JIT
428*4882a593Smuzhiyun * allocated GPU virtual address to.
429*4882a593Smuzhiyun * @va_pages: The minimum number of virtual pages required.
430*4882a593Smuzhiyun * @commit_pages: The minimum number of physical pages which
431*4882a593Smuzhiyun * should back the allocation.
432*4882a593Smuzhiyun * @extent: Granularity of physical pages to grow the
433*4882a593Smuzhiyun * allocation by during a fault.
434*4882a593Smuzhiyun * @id: Unique ID provided by the caller, this is used
435*4882a593Smuzhiyun * to pair allocation and free requests.
436*4882a593Smuzhiyun * Zero is not a valid value.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun struct base_jit_alloc_info {
439*4882a593Smuzhiyun u64 gpu_alloc_addr;
440*4882a593Smuzhiyun u64 va_pages;
441*4882a593Smuzhiyun u64 commit_pages;
442*4882a593Smuzhiyun u64 extent;
443*4882a593Smuzhiyun u8 id;
444*4882a593Smuzhiyun };
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /**
447*4882a593Smuzhiyun * @brief Job dependency type.
448*4882a593Smuzhiyun *
449*4882a593Smuzhiyun * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
450*4882a593Smuzhiyun * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
451*4882a593Smuzhiyun * changing the structure size).
452*4882a593Smuzhiyun * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
453*4882a593Smuzhiyun * errors will not be propagated.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun typedef u8 base_jd_dep_type;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun #define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
459*4882a593Smuzhiyun #define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
460*4882a593Smuzhiyun #define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun * @brief Job chain hardware requirements.
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * A job chain must specify what GPU features it needs to allow the
466*4882a593Smuzhiyun * driver to schedule the job correctly. By not specifying the
467*4882a593Smuzhiyun * correct settings can/will cause an early job termination. Multiple
468*4882a593Smuzhiyun * values can be ORed together to specify multiple requirements.
469*4882a593Smuzhiyun * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
470*4882a593Smuzhiyun * dependencies, and that doesn't execute anything on the hardware.
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun typedef u32 base_jd_core_req;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Requirements that come from the HW */
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun * No requirement, dependency only
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun #define BASE_JD_REQ_DEP ((base_jd_core_req)0)
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /**
482*4882a593Smuzhiyun * Requires fragment shaders
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun #define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun * Requires compute shaders
488*4882a593Smuzhiyun * This covers any of the following Midgard Job types:
489*4882a593Smuzhiyun * - Vertex Shader Job
490*4882a593Smuzhiyun * - Geometry Shader Job
491*4882a593Smuzhiyun * - An actual Compute Shader Job
492*4882a593Smuzhiyun *
493*4882a593Smuzhiyun * Compare this with @ref BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
494*4882a593Smuzhiyun * job is specifically just the "Compute Shader" job type, and not the "Vertex
495*4882a593Smuzhiyun * Shader" nor the "Geometry Shader" job type.
496*4882a593Smuzhiyun */
497*4882a593Smuzhiyun #define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
498*4882a593Smuzhiyun #define BASE_JD_REQ_T ((base_jd_core_req)1 << 2) /**< Requires tiling */
499*4882a593Smuzhiyun #define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3) /**< Requires cache flushes */
500*4882a593Smuzhiyun #define BASE_JD_REQ_V ((base_jd_core_req)1 << 4) /**< Requires value writeback */
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* SW-only requirements - the HW does not expose these as part of the job slot capabilities */
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /* Requires fragment job with AFBC encoding */
505*4882a593Smuzhiyun #define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /**
508*4882a593Smuzhiyun * SW-only requirement: coalesce completion events.
509*4882a593Smuzhiyun * If this bit is set then completion of this atom will not cause an event to
510*4882a593Smuzhiyun * be sent to userspace, whether successful or not; completion events will be
511*4882a593Smuzhiyun * deferred until an atom completes which does not have this bit set.
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun #define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /**
518*4882a593Smuzhiyun * SW Only requirement: the job chain requires a coherent core group. We don't
519*4882a593Smuzhiyun * mind which coherent core group is used.
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun #define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /**
524*4882a593Smuzhiyun * SW Only requirement: The performance counters should be enabled only when
525*4882a593Smuzhiyun * they are needed, to reduce power consumption.
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun #define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /**
531*4882a593Smuzhiyun * SW Only requirement: External resources are referenced by this atom.
532*4882a593Smuzhiyun * When external resources are referenced no syncsets can be bundled with the atom
533*4882a593Smuzhiyun * but should instead be part of a NULL jobs inserted into the dependency tree.
534*4882a593Smuzhiyun * The first pre_dep object must be configured for the external resouces to use,
535*4882a593Smuzhiyun * the second pre_dep object can be used to create other dependencies.
536*4882a593Smuzhiyun *
537*4882a593Smuzhiyun * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE.
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun #define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /**
542*4882a593Smuzhiyun * SW Only requirement: Software defined job. Jobs with this bit set will not be submitted
543*4882a593Smuzhiyun * to the hardware but will cause some action to happen within the driver
544*4882a593Smuzhiyun */
545*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
548*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
549*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /**
552*4882a593Smuzhiyun * SW Only requirement : Replay job.
553*4882a593Smuzhiyun *
554*4882a593Smuzhiyun * If the preceding job fails, the replay job will cause the jobs specified in
555*4882a593Smuzhiyun * the list of base_jd_replay_payload pointed to by the jc pointer to be
556*4882a593Smuzhiyun * replayed.
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * A replay job will only cause jobs to be replayed up to BASEP_JD_REPLAY_LIMIT
559*4882a593Smuzhiyun * times. If a job fails more than BASEP_JD_REPLAY_LIMIT times then the replay
560*4882a593Smuzhiyun * job is failed, as well as any following dependencies.
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * The replayed jobs will require a number of atom IDs. If there are not enough
563*4882a593Smuzhiyun * free atom IDs then the replay job will fail.
564*4882a593Smuzhiyun *
565*4882a593Smuzhiyun * If the preceding job does not fail, then the replay job is returned as
566*4882a593Smuzhiyun * completed.
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * The replayed jobs will never be returned to userspace. The preceding failed
569*4882a593Smuzhiyun * job will be returned to userspace as failed; the status of this job should
570*4882a593Smuzhiyun * be ignored. Completion should be determined by the status of the replay soft
571*4882a593Smuzhiyun * job.
572*4882a593Smuzhiyun *
573*4882a593Smuzhiyun * In order for the jobs to be replayed, the job headers will have to be
574*4882a593Smuzhiyun * modified. The Status field will be reset to NOT_STARTED. If the Job Type
575*4882a593Smuzhiyun * field indicates a Vertex Shader Job then it will be changed to Null Job.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * The replayed jobs have the following assumptions :
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * - No external resources. Any required external resources will be held by the
580*4882a593Smuzhiyun * replay atom.
581*4882a593Smuzhiyun * - Pre-dependencies are created based on job order.
582*4882a593Smuzhiyun * - Atom numbers are automatically assigned.
583*4882a593Smuzhiyun * - device_nr is set to 0. This is not relevant as
584*4882a593Smuzhiyun * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
585*4882a593Smuzhiyun * - Priority is inherited from the replay job.
586*4882a593Smuzhiyun */
587*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_REPLAY (BASE_JD_REQ_SOFT_JOB | 0x4)
588*4882a593Smuzhiyun /**
589*4882a593Smuzhiyun * SW only requirement: event wait/trigger job.
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
592*4882a593Smuzhiyun * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
593*4882a593Smuzhiyun * other waiting jobs. It completes immediately.
594*4882a593Smuzhiyun * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
595*4882a593Smuzhiyun * possible for other jobs to wait upon. It completes immediately.
596*4882a593Smuzhiyun */
597*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
598*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
599*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun * SW only requirement: Just In Time allocation
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * This job requests a JIT allocation based on the request in the
607*4882a593Smuzhiyun * @base_jit_alloc_info structure which is passed via the jc element of
608*4882a593Smuzhiyun * the atom.
609*4882a593Smuzhiyun *
610*4882a593Smuzhiyun * It should be noted that the id entry in @base_jit_alloc_info must not
611*4882a593Smuzhiyun * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
614*4882a593Smuzhiyun * soft job to free the JIT allocation is still made.
615*4882a593Smuzhiyun *
616*4882a593Smuzhiyun * The job will complete immediately.
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
619*4882a593Smuzhiyun /**
620*4882a593Smuzhiyun * SW only requirement: Just In Time free
621*4882a593Smuzhiyun *
622*4882a593Smuzhiyun * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC
623*4882a593Smuzhiyun * to be freed. The ID of the JIT allocation is passed via the jc element of
624*4882a593Smuzhiyun * the atom.
625*4882a593Smuzhiyun *
626*4882a593Smuzhiyun * The job will complete immediately.
627*4882a593Smuzhiyun */
628*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /**
631*4882a593Smuzhiyun * SW only requirement: Map external resource
632*4882a593Smuzhiyun *
633*4882a593Smuzhiyun * This job requests external resource(s) are mapped once the dependencies
634*4882a593Smuzhiyun * of the job have been satisfied. The list of external resources are
635*4882a593Smuzhiyun * passed via the jc element of the atom which is a pointer to a
636*4882a593Smuzhiyun * @base_external_resource_list.
637*4882a593Smuzhiyun */
638*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
639*4882a593Smuzhiyun /**
640*4882a593Smuzhiyun * SW only requirement: Unmap external resource
641*4882a593Smuzhiyun *
642*4882a593Smuzhiyun * This job requests external resource(s) are unmapped once the dependencies
643*4882a593Smuzhiyun * of the job has been satisfied. The list of external resources are
644*4882a593Smuzhiyun * passed via the jc element of the atom which is a pointer to a
645*4882a593Smuzhiyun * @base_external_resource_list.
646*4882a593Smuzhiyun */
647*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /**
650*4882a593Smuzhiyun * HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
651*4882a593Smuzhiyun *
652*4882a593Smuzhiyun * This indicates that the Job Chain contains Midgard Jobs of the 'Compute Shaders' type.
653*4882a593Smuzhiyun *
654*4882a593Smuzhiyun * In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
655*4882a593Smuzhiyun * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
656*4882a593Smuzhiyun */
657*4882a593Smuzhiyun #define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /**
660*4882a593Smuzhiyun * HW Requirement: Use the base_jd_atom::device_nr field to specify a
661*4882a593Smuzhiyun * particular core group
662*4882a593Smuzhiyun *
663*4882a593Smuzhiyun * If both @ref BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag takes priority
664*4882a593Smuzhiyun *
665*4882a593Smuzhiyun * This is only guaranteed to work for @ref BASE_JD_REQ_ONLY_COMPUTE atoms.
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * If the core availability policy is keeping the required core group turned off, then
668*4882a593Smuzhiyun * the job will fail with a @ref BASE_JD_EVENT_PM_EVENT error code.
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun #define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /**
673*4882a593Smuzhiyun * SW Flag: If this bit is set then the successful completion of this atom
674*4882a593Smuzhiyun * will not cause an event to be sent to userspace
675*4882a593Smuzhiyun */
676*4882a593Smuzhiyun #define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /**
679*4882a593Smuzhiyun * SW Flag: If this bit is set then completion of this atom will not cause an
680*4882a593Smuzhiyun * event to be sent to userspace, whether successful or not.
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun #define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /**
685*4882a593Smuzhiyun * SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * If this bit is set then the GPU's cache will not be cleaned and invalidated
688*4882a593Smuzhiyun * until a GPU job starts which does not have this bit set or a job completes
689*4882a593Smuzhiyun * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use if
690*4882a593Smuzhiyun * the CPU may have written to memory addressed by the job since the last job
691*4882a593Smuzhiyun * without this bit set was submitted.
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun #define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /**
696*4882a593Smuzhiyun * SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
697*4882a593Smuzhiyun *
698*4882a593Smuzhiyun * If this bit is set then the GPU's cache will not be cleaned and invalidated
699*4882a593Smuzhiyun * until a GPU job completes which does not have this bit set or a job starts
700*4882a593Smuzhiyun * which does not have the @ref BASE_JD_REQ_SKIP_CACHE_START bti set. Do not use if
701*4882a593Smuzhiyun * the CPU may read from or partially overwrite memory addressed by the job
702*4882a593Smuzhiyun * before the next job without this bit set completes.
703*4882a593Smuzhiyun */
704*4882a593Smuzhiyun #define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun * These requirement bits are currently unused in base_jd_core_req
708*4882a593Smuzhiyun */
709*4882a593Smuzhiyun #define BASEP_JD_REQ_RESERVED \
710*4882a593Smuzhiyun (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
711*4882a593Smuzhiyun BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
712*4882a593Smuzhiyun BASE_JD_REQ_EVENT_COALESCE | \
713*4882a593Smuzhiyun BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
714*4882a593Smuzhiyun BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
715*4882a593Smuzhiyun BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END))
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /**
718*4882a593Smuzhiyun * Mask of all bits in base_jd_core_req that control the type of the atom.
719*4882a593Smuzhiyun *
720*4882a593Smuzhiyun * This allows dependency only atoms to have flags set
721*4882a593Smuzhiyun */
722*4882a593Smuzhiyun #define BASE_JD_REQ_ATOM_TYPE \
723*4882a593Smuzhiyun (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
724*4882a593Smuzhiyun BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /**
727*4882a593Smuzhiyun * Mask of all bits in base_jd_core_req that control the type of a soft job.
728*4882a593Smuzhiyun */
729*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun * Returns non-zero value if core requirements passed define a soft job or
733*4882a593Smuzhiyun * a dependency only job.
734*4882a593Smuzhiyun */
735*4882a593Smuzhiyun #define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
736*4882a593Smuzhiyun ((core_req & BASE_JD_REQ_SOFT_JOB) || \
737*4882a593Smuzhiyun (core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /**
740*4882a593Smuzhiyun * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
741*4882a593Smuzhiyun * handles retaining cores for power management and affinity management.
742*4882a593Smuzhiyun *
743*4882a593Smuzhiyun * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
744*4882a593Smuzhiyun * where lots of atoms could be submitted before powerup, and each has an
745*4882a593Smuzhiyun * affinity chosen that causes other atoms to have an affinity
746*4882a593Smuzhiyun * violation. Whilst the affinity was not causing violations at the time it
747*4882a593Smuzhiyun * was chosen, it could cause violations thereafter. For example, 1000 jobs
748*4882a593Smuzhiyun * could have had their affinity chosen during the powerup time, so any of
749*4882a593Smuzhiyun * those 1000 jobs could cause an affinity violation later on.
750*4882a593Smuzhiyun *
751*4882a593Smuzhiyun * The attack would otherwise occur because other atoms/contexts have to wait for:
752*4882a593Smuzhiyun * -# the currently running atoms (which are causing the violation) to
753*4882a593Smuzhiyun * finish
754*4882a593Smuzhiyun * -# and, the atoms that had their affinity chosen during powerup to
755*4882a593Smuzhiyun * finish. These are run preferentially because they don't cause a
756*4882a593Smuzhiyun * violation, but instead continue to cause the violation in others.
757*4882a593Smuzhiyun * -# or, the attacker is scheduled out (which might not happen for just 2
758*4882a593Smuzhiyun * contexts)
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun * By re-choosing the affinity (which is designed to avoid violations at the
761*4882a593Smuzhiyun * time it's chosen), we break condition (2) of the wait, which minimizes the
762*4882a593Smuzhiyun * problem to just waiting for current jobs to finish (which can be bounded if
763*4882a593Smuzhiyun * the Job Scheduling Policy has a timer).
764*4882a593Smuzhiyun */
765*4882a593Smuzhiyun enum kbase_atom_coreref_state {
766*4882a593Smuzhiyun /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
767*4882a593Smuzhiyun KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
768*4882a593Smuzhiyun /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
769*4882a593Smuzhiyun KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
770*4882a593Smuzhiyun /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
771*4882a593Smuzhiyun KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
772*4882a593Smuzhiyun /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
773*4882a593Smuzhiyun KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
774*4882a593Smuzhiyun /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
775*4882a593Smuzhiyun KBASE_ATOM_COREREF_STATE_READY
776*4882a593Smuzhiyun };
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /*
779*4882a593Smuzhiyun * Base Atom priority
780*4882a593Smuzhiyun *
781*4882a593Smuzhiyun * Only certain priority levels are actually implemented, as specified by the
782*4882a593Smuzhiyun * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
783*4882a593Smuzhiyun * level that is not one of those defined below.
784*4882a593Smuzhiyun *
785*4882a593Smuzhiyun * Priority levels only affect scheduling between atoms of the same type within
786*4882a593Smuzhiyun * a base context, and only after the atoms have had dependencies resolved.
787*4882a593Smuzhiyun * Fragment atoms does not affect non-frament atoms with lower priorities, and
788*4882a593Smuzhiyun * the other way around. For example, a low priority atom that has had its
789*4882a593Smuzhiyun * dependencies resolved might run before a higher priority atom that has not
790*4882a593Smuzhiyun * had its dependencies resolved.
791*4882a593Smuzhiyun *
792*4882a593Smuzhiyun * The scheduling between base contexts/processes and between atoms from
793*4882a593Smuzhiyun * different base contexts/processes is unaffected by atom priority.
794*4882a593Smuzhiyun *
795*4882a593Smuzhiyun * The atoms are scheduled as follows with respect to their priorities:
796*4882a593Smuzhiyun * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
797*4882a593Smuzhiyun * resolved, and atom 'X' has a higher priority than atom 'Y'
798*4882a593Smuzhiyun * - If atom 'Y' is currently running on the HW, then it is interrupted to
799*4882a593Smuzhiyun * allow atom 'X' to run soon after
800*4882a593Smuzhiyun * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
801*4882a593Smuzhiyun * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
802*4882a593Smuzhiyun * - Any two atoms that have the same priority could run in any order with
803*4882a593Smuzhiyun * respect to each other. That is, there is no ordering constraint between
804*4882a593Smuzhiyun * atoms of the same priority.
805*4882a593Smuzhiyun */
806*4882a593Smuzhiyun typedef u8 base_jd_prio;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
809*4882a593Smuzhiyun #define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
810*4882a593Smuzhiyun /* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
811*4882a593Smuzhiyun * BASE_JD_PRIO_LOW */
812*4882a593Smuzhiyun #define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
813*4882a593Smuzhiyun /* Low atom priority. */
814*4882a593Smuzhiyun #define BASE_JD_PRIO_LOW ((base_jd_prio)2)
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* Count of the number of priority levels. This itself is not a valid
817*4882a593Smuzhiyun * base_jd_prio setting */
818*4882a593Smuzhiyun #define BASE_JD_NR_PRIO_LEVELS 3
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun enum kbase_jd_atom_state {
821*4882a593Smuzhiyun /** Atom is not used */
822*4882a593Smuzhiyun KBASE_JD_ATOM_STATE_UNUSED,
823*4882a593Smuzhiyun /** Atom is queued in JD */
824*4882a593Smuzhiyun KBASE_JD_ATOM_STATE_QUEUED,
825*4882a593Smuzhiyun /** Atom has been given to JS (is runnable/running) */
826*4882a593Smuzhiyun KBASE_JD_ATOM_STATE_IN_JS,
827*4882a593Smuzhiyun /** Atom has been completed, but not yet handed back to job dispatcher
828*4882a593Smuzhiyun * for dependency resolution */
829*4882a593Smuzhiyun KBASE_JD_ATOM_STATE_HW_COMPLETED,
830*4882a593Smuzhiyun /** Atom has been completed, but not yet handed back to userspace */
831*4882a593Smuzhiyun KBASE_JD_ATOM_STATE_COMPLETED
832*4882a593Smuzhiyun };
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun typedef u16 base_atom_id; /**< Type big enough to store an atom number in */
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun struct base_dependency {
837*4882a593Smuzhiyun base_atom_id atom_id; /**< An atom number */
838*4882a593Smuzhiyun base_jd_dep_type dependency_type; /**< Dependency type */
839*4882a593Smuzhiyun };
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* This structure has changed since UK 10.2 for which base_jd_core_req was a u16 value.
842*4882a593Smuzhiyun * In order to keep the size of the structure same, padding field has been adjusted
843*4882a593Smuzhiyun * accordingly and core_req field of a u32 type (to which UK 10.3 base_jd_core_req defines)
844*4882a593Smuzhiyun * is added at the end of the structure. Place in the structure previously occupied by u16 core_req
845*4882a593Smuzhiyun * is kept but renamed to compat_core_req and as such it can be used in ioctl call for job submission
846*4882a593Smuzhiyun * as long as UK 10.2 legacy is supported. Once when this support ends, this field can be left
847*4882a593Smuzhiyun * for possible future use. */
848*4882a593Smuzhiyun typedef struct base_jd_atom_v2 {
849*4882a593Smuzhiyun u64 jc; /**< job-chain GPU address */
850*4882a593Smuzhiyun struct base_jd_udata udata; /**< user data */
851*4882a593Smuzhiyun kbase_pointer extres_list; /**< list of external resources */
852*4882a593Smuzhiyun u16 nr_extres; /**< nr of external resources */
853*4882a593Smuzhiyun u16 compat_core_req; /**< core requirements which correspond to the legacy support for UK 10.2 */
854*4882a593Smuzhiyun struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
855*4882a593Smuzhiyun this is done in order to reduce possibility of improper assigment of a dependency field */
856*4882a593Smuzhiyun base_atom_id atom_number; /**< unique number to identify the atom */
857*4882a593Smuzhiyun base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */
858*4882a593Smuzhiyun u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
859*4882a593Smuzhiyun u8 padding[1];
860*4882a593Smuzhiyun base_jd_core_req core_req; /**< core requirements */
861*4882a593Smuzhiyun } base_jd_atom_v2;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun #ifdef BASE_LEGACY_UK6_SUPPORT
864*4882a593Smuzhiyun struct base_jd_atom_v2_uk6 {
865*4882a593Smuzhiyun u64 jc; /**< job-chain GPU address */
866*4882a593Smuzhiyun struct base_jd_udata udata; /**< user data */
867*4882a593Smuzhiyun kbase_pointer extres_list; /**< list of external resources */
868*4882a593Smuzhiyun u16 nr_extres; /**< nr of external resources */
869*4882a593Smuzhiyun u16 core_req; /**< core requirements */
870*4882a593Smuzhiyun base_atom_id pre_dep[2]; /**< pre-dependencies */
871*4882a593Smuzhiyun base_atom_id atom_number; /**< unique number to identify the atom */
872*4882a593Smuzhiyun base_jd_prio prio; /**< priority - smaller is higher priority */
873*4882a593Smuzhiyun u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
874*4882a593Smuzhiyun u8 padding[7];
875*4882a593Smuzhiyun };
876*4882a593Smuzhiyun #endif /* BASE_LEGACY_UK6_SUPPORT */
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun typedef enum base_external_resource_access {
879*4882a593Smuzhiyun BASE_EXT_RES_ACCESS_SHARED,
880*4882a593Smuzhiyun BASE_EXT_RES_ACCESS_EXCLUSIVE
881*4882a593Smuzhiyun } base_external_resource_access;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun typedef struct base_external_resource {
884*4882a593Smuzhiyun u64 ext_resource;
885*4882a593Smuzhiyun } base_external_resource;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /**
889*4882a593Smuzhiyun * The maximum number of external resources which can be mapped/unmapped
890*4882a593Smuzhiyun * in a single request.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun #define BASE_EXT_RES_COUNT_MAX 10
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun * struct base_external_resource_list - Structure which describes a list of
896*4882a593Smuzhiyun * external resources.
897*4882a593Smuzhiyun * @count: The number of resources.
898*4882a593Smuzhiyun * @ext_res: Array of external resources which is
899*4882a593Smuzhiyun * sized at allocation time.
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun struct base_external_resource_list {
902*4882a593Smuzhiyun u64 count;
903*4882a593Smuzhiyun struct base_external_resource ext_res[1];
904*4882a593Smuzhiyun };
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun struct base_jd_debug_copy_buffer {
907*4882a593Smuzhiyun u64 address;
908*4882a593Smuzhiyun u64 size;
909*4882a593Smuzhiyun struct base_external_resource extres;
910*4882a593Smuzhiyun };
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /**
913*4882a593Smuzhiyun * @brief Setter for a dependency structure
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * @param[in] dep The kbase jd atom dependency to be initialized.
916*4882a593Smuzhiyun * @param id The atom_id to be assigned.
917*4882a593Smuzhiyun * @param dep_type The dep_type to be assigned.
918*4882a593Smuzhiyun *
919*4882a593Smuzhiyun */
base_jd_atom_dep_set(struct base_dependency * dep,base_atom_id id,base_jd_dep_type dep_type)920*4882a593Smuzhiyun static inline void base_jd_atom_dep_set(struct base_dependency *dep,
921*4882a593Smuzhiyun base_atom_id id, base_jd_dep_type dep_type)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun LOCAL_ASSERT(dep != NULL);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /*
926*4882a593Smuzhiyun * make sure we don't set not allowed combinations
927*4882a593Smuzhiyun * of atom_id/dependency_type.
928*4882a593Smuzhiyun */
929*4882a593Smuzhiyun LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
930*4882a593Smuzhiyun (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun dep->atom_id = id;
933*4882a593Smuzhiyun dep->dependency_type = dep_type;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /**
937*4882a593Smuzhiyun * @brief Make a copy of a dependency structure
938*4882a593Smuzhiyun *
939*4882a593Smuzhiyun * @param[in,out] dep The kbase jd atom dependency to be written.
940*4882a593Smuzhiyun * @param[in] from The dependency to make a copy from.
941*4882a593Smuzhiyun *
942*4882a593Smuzhiyun */
base_jd_atom_dep_copy(struct base_dependency * dep,const struct base_dependency * from)943*4882a593Smuzhiyun static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
944*4882a593Smuzhiyun const struct base_dependency *from)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun LOCAL_ASSERT(dep != NULL);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /**
952*4882a593Smuzhiyun * @brief Soft-atom fence trigger setup.
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun * Sets up an atom to be a SW-only atom signaling a fence
955*4882a593Smuzhiyun * when it reaches the run state.
956*4882a593Smuzhiyun *
957*4882a593Smuzhiyun * Using the existing base dependency system the fence can
958*4882a593Smuzhiyun * be set to trigger when a GPU job has finished.
959*4882a593Smuzhiyun *
960*4882a593Smuzhiyun * The base fence object must not be terminated until the atom
961*4882a593Smuzhiyun * has been submitted to @a base_jd_submit and @a base_jd_submit has returned.
962*4882a593Smuzhiyun *
963*4882a593Smuzhiyun * @a fence must be a valid fence set up with @a base_fence_init.
964*4882a593Smuzhiyun * Calling this function with a uninitialized fence results in undefined behavior.
965*4882a593Smuzhiyun *
966*4882a593Smuzhiyun * @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
967*4882a593Smuzhiyun * @param[in] fence The base fence object to trigger.
968*4882a593Smuzhiyun */
base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 * atom,struct base_fence * fence)969*4882a593Smuzhiyun static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun LOCAL_ASSERT(atom);
972*4882a593Smuzhiyun LOCAL_ASSERT(fence);
973*4882a593Smuzhiyun LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
974*4882a593Smuzhiyun LOCAL_ASSERT(fence->basep.stream_fd >= 0);
975*4882a593Smuzhiyun atom->jc = (uintptr_t) fence;
976*4882a593Smuzhiyun atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun /**
980*4882a593Smuzhiyun * @brief Soft-atom fence wait setup.
981*4882a593Smuzhiyun *
982*4882a593Smuzhiyun * Sets up an atom to be a SW-only atom waiting on a fence.
983*4882a593Smuzhiyun * When the fence becomes triggered the atom becomes runnable
984*4882a593Smuzhiyun * and completes immediately.
985*4882a593Smuzhiyun *
986*4882a593Smuzhiyun * Using the existing base dependency system the fence can
987*4882a593Smuzhiyun * be set to block a GPU job until it has been triggered.
988*4882a593Smuzhiyun *
989*4882a593Smuzhiyun * The base fence object must not be terminated until the atom
990*4882a593Smuzhiyun * has been submitted to @a base_jd_submit and @a base_jd_submit has returned.
991*4882a593Smuzhiyun *
992*4882a593Smuzhiyun * @a fence must be a valid fence set up with @a base_fence_init or @a base_fence_import.
993*4882a593Smuzhiyun * Calling this function with a uninitialized fence results in undefined behavior.
994*4882a593Smuzhiyun *
995*4882a593Smuzhiyun * @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
996*4882a593Smuzhiyun * @param[in] fence The base fence object to wait on
997*4882a593Smuzhiyun */
base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 * atom,struct base_fence * fence)998*4882a593Smuzhiyun static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun LOCAL_ASSERT(atom);
1001*4882a593Smuzhiyun LOCAL_ASSERT(fence);
1002*4882a593Smuzhiyun LOCAL_ASSERT(fence->basep.fd >= 0);
1003*4882a593Smuzhiyun atom->jc = (uintptr_t) fence;
1004*4882a593Smuzhiyun atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /**
1008*4882a593Smuzhiyun * @brief External resource info initialization.
1009*4882a593Smuzhiyun *
1010*4882a593Smuzhiyun * Sets up an external resource object to reference
1011*4882a593Smuzhiyun * a memory allocation and the type of access requested.
1012*4882a593Smuzhiyun *
1013*4882a593Smuzhiyun * @param[in] res The resource object to initialize
1014*4882a593Smuzhiyun * @param handle The handle to the imported memory object, must be
1015*4882a593Smuzhiyun * obtained by calling @ref base_mem_as_import_handle().
1016*4882a593Smuzhiyun * @param access The type of access requested
1017*4882a593Smuzhiyun */
base_external_resource_init(struct base_external_resource * res,struct base_import_handle handle,base_external_resource_access access)1018*4882a593Smuzhiyun static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun u64 address;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun address = handle.basep.handle;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun LOCAL_ASSERT(res != NULL);
1025*4882a593Smuzhiyun LOCAL_ASSERT(0 == (address & LOCAL_PAGE_LSB));
1026*4882a593Smuzhiyun LOCAL_ASSERT(access == BASE_EXT_RES_ACCESS_SHARED || access == BASE_EXT_RES_ACCESS_EXCLUSIVE);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun res->ext_resource = address | (access & LOCAL_PAGE_LSB);
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /**
1032*4882a593Smuzhiyun * @brief Job chain event code bits
1033*4882a593Smuzhiyun * Defines the bits used to create ::base_jd_event_code
1034*4882a593Smuzhiyun */
1035*4882a593Smuzhiyun enum {
1036*4882a593Smuzhiyun BASE_JD_SW_EVENT_KERNEL = (1u << 15), /**< Kernel side event */
1037*4882a593Smuzhiyun BASE_JD_SW_EVENT = (1u << 14), /**< SW defined event */
1038*4882a593Smuzhiyun BASE_JD_SW_EVENT_SUCCESS = (1u << 13), /**< Event idicates success (SW events only) */
1039*4882a593Smuzhiyun BASE_JD_SW_EVENT_JOB = (0u << 11), /**< Job related event */
1040*4882a593Smuzhiyun BASE_JD_SW_EVENT_BAG = (1u << 11), /**< Bag related event */
1041*4882a593Smuzhiyun BASE_JD_SW_EVENT_INFO = (2u << 11), /**< Misc/info event */
1042*4882a593Smuzhiyun BASE_JD_SW_EVENT_RESERVED = (3u << 11), /**< Reserved event type */
1043*4882a593Smuzhiyun BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11) /**< Mask to extract the type from an event code */
1044*4882a593Smuzhiyun };
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /**
1047*4882a593Smuzhiyun * @brief Job chain event codes
1048*4882a593Smuzhiyun *
1049*4882a593Smuzhiyun * HW and low-level SW events are represented by event codes.
1050*4882a593Smuzhiyun * The status of jobs which succeeded are also represented by
1051*4882a593Smuzhiyun * an event code (see ::BASE_JD_EVENT_DONE).
1052*4882a593Smuzhiyun * Events are usually reported as part of a ::base_jd_event.
1053*4882a593Smuzhiyun *
1054*4882a593Smuzhiyun * The event codes are encoded in the following way:
1055*4882a593Smuzhiyun * @li 10:0 - subtype
1056*4882a593Smuzhiyun * @li 12:11 - type
1057*4882a593Smuzhiyun * @li 13 - SW success (only valid if the SW bit is set)
1058*4882a593Smuzhiyun * @li 14 - SW event (HW event if not set)
1059*4882a593Smuzhiyun * @li 15 - Kernel event (should never be seen in userspace)
1060*4882a593Smuzhiyun *
1061*4882a593Smuzhiyun * Events are split up into ranges as follows:
1062*4882a593Smuzhiyun * - BASE_JD_EVENT_RANGE_\<description\>_START
1063*4882a593Smuzhiyun * - BASE_JD_EVENT_RANGE_\<description\>_END
1064*4882a593Smuzhiyun *
1065*4882a593Smuzhiyun * \a code is in \<description\>'s range when:
1066*4882a593Smuzhiyun * - <tt>BASE_JD_EVENT_RANGE_\<description\>_START <= code < BASE_JD_EVENT_RANGE_\<description\>_END </tt>
1067*4882a593Smuzhiyun *
1068*4882a593Smuzhiyun * Ranges can be asserted for adjacency by testing that the END of the previous
1069*4882a593Smuzhiyun * is equal to the START of the next. This is useful for optimizing some tests
1070*4882a593Smuzhiyun * for range.
1071*4882a593Smuzhiyun *
1072*4882a593Smuzhiyun * A limitation is that the last member of this enum must explicitly be handled
1073*4882a593Smuzhiyun * (with an assert-unreachable statement) in switch statements that use
1074*4882a593Smuzhiyun * variables of this type. Otherwise, the compiler warns that we have not
1075*4882a593Smuzhiyun * handled that enum value.
1076*4882a593Smuzhiyun */
1077*4882a593Smuzhiyun typedef enum base_jd_event_code {
1078*4882a593Smuzhiyun /* HW defined exceptions */
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun /** Start of HW Non-fault status codes
1081*4882a593Smuzhiyun *
1082*4882a593Smuzhiyun * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
1083*4882a593Smuzhiyun * because the job was hard-stopped
1084*4882a593Smuzhiyun */
1085*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /* non-fatal exceptions */
1088*4882a593Smuzhiyun BASE_JD_EVENT_NOT_STARTED = 0x00, /**< Can't be seen by userspace, treated as 'previous job done' */
1089*4882a593Smuzhiyun BASE_JD_EVENT_DONE = 0x01,
1090*4882a593Smuzhiyun BASE_JD_EVENT_STOPPED = 0x03, /**< Can't be seen by userspace, becomes TERMINATED, DONE or JOB_CANCELLED */
1091*4882a593Smuzhiyun BASE_JD_EVENT_TERMINATED = 0x04, /**< This is actually a fault status code - the job was hard stopped */
1092*4882a593Smuzhiyun BASE_JD_EVENT_ACTIVE = 0x08, /**< Can't be seen by userspace, jobs only returned on complete/fail/cancel */
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /** End of HW Non-fault status codes
1095*4882a593Smuzhiyun *
1096*4882a593Smuzhiyun * @note Obscurely, BASE_JD_EVENT_TERMINATED indicates a real fault,
1097*4882a593Smuzhiyun * because the job was hard-stopped
1098*4882a593Smuzhiyun */
1099*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun /** Start of HW fault and SW Error status codes */
1102*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /* job exceptions */
1105*4882a593Smuzhiyun BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
1106*4882a593Smuzhiyun BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
1107*4882a593Smuzhiyun BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
1108*4882a593Smuzhiyun BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
1109*4882a593Smuzhiyun BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
1110*4882a593Smuzhiyun BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
1111*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
1112*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
1113*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
1114*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
1115*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
1116*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
1117*4882a593Smuzhiyun BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
1118*4882a593Smuzhiyun BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
1119*4882a593Smuzhiyun BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
1120*4882a593Smuzhiyun BASE_JD_EVENT_STATE_FAULT = 0x5A,
1121*4882a593Smuzhiyun BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
1122*4882a593Smuzhiyun BASE_JD_EVENT_UNKNOWN = 0x7F,
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* GPU exceptions */
1125*4882a593Smuzhiyun BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
1126*4882a593Smuzhiyun BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun /* MMU exceptions */
1129*4882a593Smuzhiyun BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
1130*4882a593Smuzhiyun BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
1131*4882a593Smuzhiyun BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
1132*4882a593Smuzhiyun BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
1133*4882a593Smuzhiyun BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
1134*4882a593Smuzhiyun BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
1135*4882a593Smuzhiyun BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
1136*4882a593Smuzhiyun BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
1137*4882a593Smuzhiyun BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
1138*4882a593Smuzhiyun BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /* SW defined exceptions */
1141*4882a593Smuzhiyun BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
1142*4882a593Smuzhiyun BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
1143*4882a593Smuzhiyun BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
1144*4882a593Smuzhiyun BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
1145*4882a593Smuzhiyun BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
1146*4882a593Smuzhiyun BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun /** End of HW fault and SW Error status codes */
1151*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun /** Start of SW Success status codes */
1154*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | 0x000,
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
1157*4882a593Smuzhiyun BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_BAG | 0x000,
1158*4882a593Smuzhiyun BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun /** End of SW Success status codes */
1161*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /** Start of Kernel-only status codes. Such codes are never returned to user-space */
1164*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | 0x000,
1165*4882a593Smuzhiyun BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /** End of Kernel-only status codes. */
1168*4882a593Smuzhiyun BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
1169*4882a593Smuzhiyun } base_jd_event_code;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun /**
1172*4882a593Smuzhiyun * @brief Event reporting structure
1173*4882a593Smuzhiyun *
1174*4882a593Smuzhiyun * This structure is used by the kernel driver to report information
1175*4882a593Smuzhiyun * about GPU events. The can either be HW-specific events or low-level
1176*4882a593Smuzhiyun * SW events, such as job-chain completion.
1177*4882a593Smuzhiyun *
1178*4882a593Smuzhiyun * The event code contains an event type field which can be extracted
1179*4882a593Smuzhiyun * by ANDing with ::BASE_JD_SW_EVENT_TYPE_MASK.
1180*4882a593Smuzhiyun *
1181*4882a593Smuzhiyun * Based on the event type base_jd_event::data holds:
1182*4882a593Smuzhiyun * @li ::BASE_JD_SW_EVENT_JOB : the offset in the ring-buffer for the completed
1183*4882a593Smuzhiyun * job-chain
1184*4882a593Smuzhiyun * @li ::BASE_JD_SW_EVENT_BAG : The address of the ::base_jd_bag that has
1185*4882a593Smuzhiyun * been completed (ie all contained job-chains have been completed).
1186*4882a593Smuzhiyun * @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
1187*4882a593Smuzhiyun */
1188*4882a593Smuzhiyun typedef struct base_jd_event_v2 {
1189*4882a593Smuzhiyun base_jd_event_code event_code; /**< event code */
1190*4882a593Smuzhiyun base_atom_id atom_number; /**< the atom number that has completed */
1191*4882a593Smuzhiyun struct base_jd_udata udata; /**< user data */
1192*4882a593Smuzhiyun } base_jd_event_v2;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /**
1195*4882a593Smuzhiyun * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills
1196*4882a593Smuzhiyun * a full cache line.
1197*4882a593Smuzhiyun */
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun #define BASE_CPU_GPU_CACHE_LINE_PADDING (36)
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /**
1203*4882a593Smuzhiyun * @brief Structure for BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS jobs.
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * This structure is stored into the memory pointed to by the @c jc field of @ref base_jd_atom.
1206*4882a593Smuzhiyun *
1207*4882a593Smuzhiyun * This structure must be padded to ensure that it will occupy whole cache lines. This is to avoid
1208*4882a593Smuzhiyun * cases where access to pages containing the structure is shared between cached and un-cached
1209*4882a593Smuzhiyun * memory regions, which would cause memory corruption. Here we set the structure size to be 64 bytes
1210*4882a593Smuzhiyun * which is the cache line for ARM A15 processors.
1211*4882a593Smuzhiyun */
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun typedef struct base_dump_cpu_gpu_counters {
1214*4882a593Smuzhiyun u64 system_time;
1215*4882a593Smuzhiyun u64 cycle_counter;
1216*4882a593Smuzhiyun u64 sec;
1217*4882a593Smuzhiyun u32 usec;
1218*4882a593Smuzhiyun u8 padding[BASE_CPU_GPU_CACHE_LINE_PADDING];
1219*4882a593Smuzhiyun } base_dump_cpu_gpu_counters;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /** @} end group base_user_api_job_dispatch */
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun #define GPU_MAX_JOB_SLOTS 16
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /**
1228*4882a593Smuzhiyun * @page page_base_user_api_gpuprops User-side Base GPU Property Query API
1229*4882a593Smuzhiyun *
1230*4882a593Smuzhiyun * The User-side Base GPU Property Query API encapsulates two
1231*4882a593Smuzhiyun * sub-modules:
1232*4882a593Smuzhiyun *
1233*4882a593Smuzhiyun * - @ref base_user_api_gpuprops_dyn "Dynamic GPU Properties"
1234*4882a593Smuzhiyun * - @ref base_plat_config_gpuprops "Base Platform Config GPU Properties"
1235*4882a593Smuzhiyun *
1236*4882a593Smuzhiyun * There is a related third module outside of Base, which is owned by the MIDG
1237*4882a593Smuzhiyun * module:
1238*4882a593Smuzhiyun * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
1239*4882a593Smuzhiyun *
1240*4882a593Smuzhiyun * Base only deals with properties that vary between different Midgard
1241*4882a593Smuzhiyun * implementations - the Dynamic GPU properties and the Platform Config
1242*4882a593Smuzhiyun * properties.
1243*4882a593Smuzhiyun *
1244*4882a593Smuzhiyun * For properties that are constant for the Midgard Architecture, refer to the
1245*4882a593Smuzhiyun * MIDG module. However, we will discuss their relevance here <b>just to
1246*4882a593Smuzhiyun * provide background information.</b>
1247*4882a593Smuzhiyun *
1248*4882a593Smuzhiyun * @section sec_base_user_api_gpuprops_about About the GPU Properties in Base and MIDG modules
1249*4882a593Smuzhiyun *
1250*4882a593Smuzhiyun * The compile-time properties (Platform Config, Midgard Compile-time
1251*4882a593Smuzhiyun * properties) are exposed as pre-processor macros.
1252*4882a593Smuzhiyun *
1253*4882a593Smuzhiyun * Complementing the compile-time properties are the Dynamic GPU
1254*4882a593Smuzhiyun * Properties, which act as a conduit for the Midgard Configuration
1255*4882a593Smuzhiyun * Discovery.
1256*4882a593Smuzhiyun *
1257*4882a593Smuzhiyun * In general, the dynamic properties are present to verify that the platform
1258*4882a593Smuzhiyun * has been configured correctly with the right set of Platform Config
1259*4882a593Smuzhiyun * Compile-time Properties.
1260*4882a593Smuzhiyun *
1261*4882a593Smuzhiyun * As a consistent guide across the entire DDK, the choice for dynamic or
1262*4882a593Smuzhiyun * compile-time should consider the following, in order:
1263*4882a593Smuzhiyun * -# Can the code be written so that it doesn't need to know the
1264*4882a593Smuzhiyun * implementation limits at all?
1265*4882a593Smuzhiyun * -# If you need the limits, get the information from the Dynamic Property
1266*4882a593Smuzhiyun * lookup. This should be done once as you fetch the context, and then cached
1267*4882a593Smuzhiyun * as part of the context data structure, so it's cheap to access.
1268*4882a593Smuzhiyun * -# If there's a clear and arguable inefficiency in using Dynamic Properties,
1269*4882a593Smuzhiyun * then use a Compile-Time Property (Platform Config, or Midgard Compile-time
1270*4882a593Smuzhiyun * property). Examples of where this might be sensible follow:
1271*4882a593Smuzhiyun * - Part of a critical inner-loop
1272*4882a593Smuzhiyun * - Frequent re-use throughout the driver, causing significant extra load
1273*4882a593Smuzhiyun * instructions or control flow that would be worthwhile optimizing out.
1274*4882a593Smuzhiyun *
1275*4882a593Smuzhiyun * We cannot provide an exhaustive set of examples, neither can we provide a
1276*4882a593Smuzhiyun * rule for every possible situation. Use common sense, and think about: what
1277*4882a593Smuzhiyun * the rest of the driver will be doing; how the compiler might represent the
1278*4882a593Smuzhiyun * value if it is a compile-time constant; whether an OEM shipping multiple
1279*4882a593Smuzhiyun * devices would benefit much more from a single DDK binary, instead of
1280*4882a593Smuzhiyun * insignificant micro-optimizations.
1281*4882a593Smuzhiyun *
1282*4882a593Smuzhiyun * @section sec_base_user_api_gpuprops_dyn Dynamic GPU Properties
1283*4882a593Smuzhiyun *
1284*4882a593Smuzhiyun * Dynamic GPU properties are presented in two sets:
1285*4882a593Smuzhiyun * -# the commonly used properties in @ref base_gpu_props, which have been
1286*4882a593Smuzhiyun * unpacked from GPU register bitfields.
1287*4882a593Smuzhiyun * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
1288*4882a593Smuzhiyun * (also a member of @ref base_gpu_props). All of these are presented in
1289*4882a593Smuzhiyun * the packed form, as presented by the GPU registers themselves.
1290*4882a593Smuzhiyun *
1291*4882a593Smuzhiyun * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
1292*4882a593Smuzhiyun * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
1293*4882a593Smuzhiyun * behaving differently?". In this case, all information about the
1294*4882a593Smuzhiyun * configuration is potentially useful, but it <b>does not need to be processed
1295*4882a593Smuzhiyun * by the driver</b>. Instead, the raw registers can be processed by the Mali
1296*4882a593Smuzhiyun * Tools software on the host PC.
1297*4882a593Smuzhiyun *
1298*4882a593Smuzhiyun * The properties returned extend the Midgard Configuration Discovery
1299*4882a593Smuzhiyun * registers. For example, GPU clock speed is not specified in the Midgard
1300*4882a593Smuzhiyun * Architecture, but is <b>necessary for OpenCL's clGetDeviceInfo() function</b>.
1301*4882a593Smuzhiyun *
1302*4882a593Smuzhiyun * The GPU properties are obtained by a call to
1303*4882a593Smuzhiyun * _mali_base_get_gpu_props(). This simply returns a pointer to a const
1304*4882a593Smuzhiyun * base_gpu_props structure. It is constant for the life of a base
1305*4882a593Smuzhiyun * context. Multiple calls to _mali_base_get_gpu_props() to a base context
1306*4882a593Smuzhiyun * return the same pointer to a constant structure. This avoids cache pollution
1307*4882a593Smuzhiyun * of the common data.
1308*4882a593Smuzhiyun *
1309*4882a593Smuzhiyun * This pointer must not be freed, because it does not point to the start of a
1310*4882a593Smuzhiyun * region allocated by the memory allocator; instead, just close the @ref
1311*4882a593Smuzhiyun * base_context.
1312*4882a593Smuzhiyun *
1313*4882a593Smuzhiyun *
1314*4882a593Smuzhiyun * @section sec_base_user_api_gpuprops_config Platform Config Compile-time Properties
1315*4882a593Smuzhiyun *
1316*4882a593Smuzhiyun * The Platform Config File sets up gpu properties that are specific to a
1317*4882a593Smuzhiyun * certain platform. Properties that are 'Implementation Defined' in the
1318*4882a593Smuzhiyun * Midgard Architecture spec are placed here.
1319*4882a593Smuzhiyun *
1320*4882a593Smuzhiyun * @note Reference configurations are provided for Midgard Implementations, such as
1321*4882a593Smuzhiyun * the Mali-T600 family. The customer need not repeat this information, and can select one of
1322*4882a593Smuzhiyun * these reference configurations. For example, VA_BITS, PA_BITS and the
1323*4882a593Smuzhiyun * maximum number of samples per pixel might vary between Midgard Implementations, but
1324*4882a593Smuzhiyun * \b not for platforms using the Mali-T604. This information is placed in
1325*4882a593Smuzhiyun * the reference configuration files.
1326*4882a593Smuzhiyun *
1327*4882a593Smuzhiyun * The System Integrator creates the following structure:
1328*4882a593Smuzhiyun * - platform_XYZ
1329*4882a593Smuzhiyun * - platform_XYZ/plat
1330*4882a593Smuzhiyun * - platform_XYZ/plat/plat_config.h
1331*4882a593Smuzhiyun *
1332*4882a593Smuzhiyun * They then edit plat_config.h, using the example plat_config.h files as a
1333*4882a593Smuzhiyun * guide.
1334*4882a593Smuzhiyun *
1335*4882a593Smuzhiyun * At the very least, the customer must set @ref CONFIG_GPU_CORE_TYPE, and will
1336*4882a593Smuzhiyun * receive a helpful \#error message if they do not do this correctly. This
1337*4882a593Smuzhiyun * selects the Reference Configuration for the Midgard Implementation. The rationale
1338*4882a593Smuzhiyun * behind this decision (against asking the customer to write \#include
1339*4882a593Smuzhiyun * <gpus/mali_t600.h> in their plat_config.h) is as follows:
1340*4882a593Smuzhiyun * - This mechanism 'looks' like a regular config file (such as Linux's
1341*4882a593Smuzhiyun * .config)
1342*4882a593Smuzhiyun * - It is difficult to get wrong in a way that will produce strange build
1343*4882a593Smuzhiyun * errors:
1344*4882a593Smuzhiyun * - They need not know where the mali_t600.h, other_midg_gpu.h etc. files are stored - and
1345*4882a593Smuzhiyun * so they won't accidentally pick another file with 'mali_t600' in its name
1346*4882a593Smuzhiyun * - When the build doesn't work, the System Integrator may think the DDK is
1347*4882a593Smuzhiyun * doesn't work, and attempt to fix it themselves:
1348*4882a593Smuzhiyun * - For the @ref CONFIG_GPU_CORE_TYPE mechanism, the only way to get past the
1349*4882a593Smuzhiyun * error is to set @ref CONFIG_GPU_CORE_TYPE, and this is what the \#error tells
1350*4882a593Smuzhiyun * you.
1351*4882a593Smuzhiyun * - For a \#include mechanism, checks must still be made elsewhere, which the
1352*4882a593Smuzhiyun * System Integrator may try working around by setting \#defines (such as
1353*4882a593Smuzhiyun * VA_BITS) themselves in their plat_config.h. In the worst case, they may
1354*4882a593Smuzhiyun * set the prevention-mechanism \#define of
1355*4882a593Smuzhiyun * "A_CORRECT_MIDGARD_CORE_WAS_CHOSEN".
1356*4882a593Smuzhiyun * - In this case, they would believe they are on the right track, because
1357*4882a593Smuzhiyun * the build progresses with their fix, but with errors elsewhere.
1358*4882a593Smuzhiyun *
1359*4882a593Smuzhiyun * However, there is nothing to prevent the customer using \#include to organize
1360*4882a593Smuzhiyun * their own configurations files hierarchically.
1361*4882a593Smuzhiyun *
1362*4882a593Smuzhiyun * The mechanism for the header file processing is as follows:
1363*4882a593Smuzhiyun *
1364*4882a593Smuzhiyun * @dot
1365*4882a593Smuzhiyun digraph plat_config_mechanism {
1366*4882a593Smuzhiyun rankdir=BT
1367*4882a593Smuzhiyun size="6,6"
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun "mali_base.h";
1370*4882a593Smuzhiyun "gpu/mali_gpu.h";
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun node [ shape=box ];
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun rank = same; ordering = out;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun "gpu/mali_gpu_props.h";
1377*4882a593Smuzhiyun "base/midg_gpus/mali_t600.h";
1378*4882a593Smuzhiyun "base/midg_gpus/other_midg_gpu.h";
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun { rank = same; "plat/plat_config.h"; }
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun rank = same;
1383*4882a593Smuzhiyun "gpu/mali_gpu.h" [ shape=box ];
1384*4882a593Smuzhiyun gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ];
1385*4882a593Smuzhiyun select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun node [ shape=box ];
1388*4882a593Smuzhiyun { rank = same; "plat/plat_config.h"; }
1389*4882a593Smuzhiyun { rank = same; "mali_base.h"; }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h";
1392*4882a593Smuzhiyun "mali_base.h" -> "plat/plat_config.h" ;
1393*4882a593Smuzhiyun "mali_base.h" -> select_gpu ;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun "plat/plat_config.h" -> gpu_chooser [style="dotted,bold" dir=none weight=4] ;
1396*4882a593Smuzhiyun gpu_chooser -> select_gpu [style="dotted,bold"] ;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun select_gpu -> "base/midg_gpus/mali_t600.h" ;
1399*4882a593Smuzhiyun select_gpu -> "base/midg_gpus/other_midg_gpu.h" ;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun @enddot
1402*4882a593Smuzhiyun *
1403*4882a593Smuzhiyun *
1404*4882a593Smuzhiyun * @section sec_base_user_api_gpuprops_kernel Kernel Operation
1405*4882a593Smuzhiyun *
1406*4882a593Smuzhiyun * During Base Context Create time, user-side makes a single kernel call:
1407*4882a593Smuzhiyun * - A call to fill user memory with GPU information structures
1408*4882a593Smuzhiyun *
1409*4882a593Smuzhiyun * The kernel-side will fill the provided the entire processed @ref base_gpu_props
1410*4882a593Smuzhiyun * structure, because this information is required in both
1411*4882a593Smuzhiyun * user and kernel side; it does not make sense to decode it twice.
1412*4882a593Smuzhiyun *
1413*4882a593Smuzhiyun * Coherency groups must be derived from the bitmasks, but this can be done
1414*4882a593Smuzhiyun * kernel side, and just once at kernel startup: Coherency groups must already
1415*4882a593Smuzhiyun * be known kernel-side, to support chains that specify a 'Only Coherent Group'
1416*4882a593Smuzhiyun * SW requirement, or 'Only Coherent Group with Tiler' SW requirement.
1417*4882a593Smuzhiyun *
1418*4882a593Smuzhiyun * @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
1419*4882a593Smuzhiyun * Creation of the coherent group data is done at device-driver startup, and so
1420*4882a593Smuzhiyun * is one-time. This will most likely involve a loop with CLZ, shifting, and
1421*4882a593Smuzhiyun * bit clearing on the L2_PRESENT mask, depending on whether the
1422*4882a593Smuzhiyun * system is L2 Coherent. The number of shader cores is done by a
1423*4882a593Smuzhiyun * population count, since faulty cores may be disabled during production,
1424*4882a593Smuzhiyun * producing a non-contiguous mask.
1425*4882a593Smuzhiyun *
1426*4882a593Smuzhiyun * The memory requirements for this algorithm can be determined either by a u64
1427*4882a593Smuzhiyun * population count on the L2_PRESENT mask (a LUT helper already is
1428*4882a593Smuzhiyun * required for the above), or simple assumption that there can be no more than
1429*4882a593Smuzhiyun * 16 coherent groups, since core groups are typically 4 cores.
1430*4882a593Smuzhiyun */
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun /**
1433*4882a593Smuzhiyun * @addtogroup base_user_api_gpuprops User-side Base GPU Property Query APIs
1434*4882a593Smuzhiyun * @{
1435*4882a593Smuzhiyun */
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /**
1438*4882a593Smuzhiyun * @addtogroup base_user_api_gpuprops_dyn Dynamic HW Properties
1439*4882a593Smuzhiyun * @{
1440*4882a593Smuzhiyun */
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun #define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun #define BASE_MAX_COHERENT_GROUPS 16
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun struct mali_base_gpu_core_props {
1447*4882a593Smuzhiyun /**
1448*4882a593Smuzhiyun * Product specific value.
1449*4882a593Smuzhiyun */
1450*4882a593Smuzhiyun u32 product_id;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun /**
1453*4882a593Smuzhiyun * Status of the GPU release.
1454*4882a593Smuzhiyun * No defined values, but starts at 0 and increases by one for each
1455*4882a593Smuzhiyun * release status (alpha, beta, EAC, etc.).
1456*4882a593Smuzhiyun * 4 bit values (0-15).
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun u16 version_status;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun /**
1461*4882a593Smuzhiyun * Minor release number of the GPU. "P" part of an "RnPn" release number.
1462*4882a593Smuzhiyun * 8 bit values (0-255).
1463*4882a593Smuzhiyun */
1464*4882a593Smuzhiyun u16 minor_revision;
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun /**
1467*4882a593Smuzhiyun * Major release number of the GPU. "R" part of an "RnPn" release number.
1468*4882a593Smuzhiyun * 4 bit values (0-15).
1469*4882a593Smuzhiyun */
1470*4882a593Smuzhiyun u16 major_revision;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun u16 padding;
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun /**
1475*4882a593Smuzhiyun * This property is deprecated since it has not contained the real current
1476*4882a593Smuzhiyun * value of GPU clock speed. It is kept here only for backwards compatibility.
1477*4882a593Smuzhiyun * For the new ioctl interface, it is ignored and is treated as a padding
1478*4882a593Smuzhiyun * to keep the structure of the same size and retain the placement of its
1479*4882a593Smuzhiyun * members.
1480*4882a593Smuzhiyun */
1481*4882a593Smuzhiyun u32 gpu_speed_mhz;
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun /**
1484*4882a593Smuzhiyun * @usecase GPU clock max/min speed is required for computing best/worst case
1485*4882a593Smuzhiyun * in tasks as job scheduling ant irq_throttling. (It is not specified in the
1486*4882a593Smuzhiyun * Midgard Architecture).
1487*4882a593Smuzhiyun * Also, GPU clock max speed is used for OpenCL's clGetDeviceInfo() function.
1488*4882a593Smuzhiyun */
1489*4882a593Smuzhiyun u32 gpu_freq_khz_max;
1490*4882a593Smuzhiyun u32 gpu_freq_khz_min;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /**
1493*4882a593Smuzhiyun * Size of the shader program counter, in bits.
1494*4882a593Smuzhiyun */
1495*4882a593Smuzhiyun u32 log2_program_counter_size;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun /**
1498*4882a593Smuzhiyun * TEXTURE_FEATURES_x registers, as exposed by the GPU. This is a
1499*4882a593Smuzhiyun * bitpattern where a set bit indicates that the format is supported.
1500*4882a593Smuzhiyun *
1501*4882a593Smuzhiyun * Before using a texture format, it is recommended that the corresponding
1502*4882a593Smuzhiyun * bit be checked.
1503*4882a593Smuzhiyun */
1504*4882a593Smuzhiyun u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /**
1507*4882a593Smuzhiyun * Theoretical maximum memory available to the GPU. It is unlikely that a
1508*4882a593Smuzhiyun * client will be able to allocate all of this memory for their own
1509*4882a593Smuzhiyun * purposes, but this at least provides an upper bound on the memory
1510*4882a593Smuzhiyun * available to the GPU.
1511*4882a593Smuzhiyun *
1512*4882a593Smuzhiyun * This is required for OpenCL's clGetDeviceInfo() call when
1513*4882a593Smuzhiyun * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL GPU devices. The
1514*4882a593Smuzhiyun * client will not be expecting to allocate anywhere near this value.
1515*4882a593Smuzhiyun */
1516*4882a593Smuzhiyun u64 gpu_available_memory_size;
1517*4882a593Smuzhiyun };
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /**
1520*4882a593Smuzhiyun *
1521*4882a593Smuzhiyun * More information is possible - but associativity and bus width are not
1522*4882a593Smuzhiyun * required by upper-level apis.
1523*4882a593Smuzhiyun */
1524*4882a593Smuzhiyun struct mali_base_gpu_l2_cache_props {
1525*4882a593Smuzhiyun u8 log2_line_size;
1526*4882a593Smuzhiyun u8 log2_cache_size;
1527*4882a593Smuzhiyun u8 num_l2_slices; /* Number of L2C slices. 1 or higher */
1528*4882a593Smuzhiyun u8 padding[5];
1529*4882a593Smuzhiyun };
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun struct mali_base_gpu_tiler_props {
1532*4882a593Smuzhiyun u32 bin_size_bytes; /* Max is 4*2^15 */
1533*4882a593Smuzhiyun u32 max_active_levels; /* Max is 2^15 */
1534*4882a593Smuzhiyun };
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun /**
1537*4882a593Smuzhiyun * GPU threading system details.
1538*4882a593Smuzhiyun */
1539*4882a593Smuzhiyun struct mali_base_gpu_thread_props {
1540*4882a593Smuzhiyun u32 max_threads; /* Max. number of threads per core */
1541*4882a593Smuzhiyun u32 max_workgroup_size; /* Max. number of threads per workgroup */
1542*4882a593Smuzhiyun u32 max_barrier_size; /* Max. number of threads that can synchronize on a simple barrier */
1543*4882a593Smuzhiyun u16 max_registers; /* Total size [1..65535] of the register file available per core. */
1544*4882a593Smuzhiyun u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
1545*4882a593Smuzhiyun u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
1546*4882a593Smuzhiyun u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
1547*4882a593Smuzhiyun u8 padding[7];
1548*4882a593Smuzhiyun };
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun /**
1551*4882a593Smuzhiyun * @brief descriptor for a coherent group
1552*4882a593Smuzhiyun *
1553*4882a593Smuzhiyun * \c core_mask exposes all cores in that coherent group, and \c num_cores
1554*4882a593Smuzhiyun * provides a cached population-count for that mask.
1555*4882a593Smuzhiyun *
1556*4882a593Smuzhiyun * @note Whilst all cores are exposed in the mask, not all may be available to
1557*4882a593Smuzhiyun * the application, depending on the Kernel Power policy.
1558*4882a593Smuzhiyun *
1559*4882a593Smuzhiyun * @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
1560*4882a593Smuzhiyun */
1561*4882a593Smuzhiyun struct mali_base_gpu_coherent_group {
1562*4882a593Smuzhiyun u64 core_mask; /**< Core restriction mask required for the group */
1563*4882a593Smuzhiyun u16 num_cores; /**< Number of cores in the group */
1564*4882a593Smuzhiyun u16 padding[3];
1565*4882a593Smuzhiyun };
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun /**
1568*4882a593Smuzhiyun * @brief Coherency group information
1569*4882a593Smuzhiyun *
1570*4882a593Smuzhiyun * Note that the sizes of the members could be reduced. However, the \c group
1571*4882a593Smuzhiyun * member might be 8-byte aligned to ensure the u64 core_mask is 8-byte
1572*4882a593Smuzhiyun * aligned, thus leading to wastage if the other members sizes were reduced.
1573*4882a593Smuzhiyun *
1574*4882a593Smuzhiyun * The groups are sorted by core mask. The core masks are non-repeating and do
1575*4882a593Smuzhiyun * not intersect.
1576*4882a593Smuzhiyun */
1577*4882a593Smuzhiyun struct mali_base_gpu_coherent_group_info {
1578*4882a593Smuzhiyun u32 num_groups;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun /**
1581*4882a593Smuzhiyun * Number of core groups (coherent or not) in the GPU. Equivalent to the number of L2 Caches.
1582*4882a593Smuzhiyun *
1583*4882a593Smuzhiyun * The GPU Counter dumping writes 2048 bytes per core group, regardless of
1584*4882a593Smuzhiyun * whether the core groups are coherent or not. Hence this member is needed
1585*4882a593Smuzhiyun * to calculate how much memory is required for dumping.
1586*4882a593Smuzhiyun *
1587*4882a593Smuzhiyun * @note Do not use it to work out how many valid elements are in the
1588*4882a593Smuzhiyun * group[] member. Use num_groups instead.
1589*4882a593Smuzhiyun */
1590*4882a593Smuzhiyun u32 num_core_groups;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /**
1593*4882a593Smuzhiyun * Coherency features of the memory, accessed by @ref gpu_mem_features
1594*4882a593Smuzhiyun * methods
1595*4882a593Smuzhiyun */
1596*4882a593Smuzhiyun u32 coherency;
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun u32 padding;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun /**
1601*4882a593Smuzhiyun * Descriptors of coherent groups
1602*4882a593Smuzhiyun */
1603*4882a593Smuzhiyun struct mali_base_gpu_coherent_group group[BASE_MAX_COHERENT_GROUPS];
1604*4882a593Smuzhiyun };
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun /**
1607*4882a593Smuzhiyun * A complete description of the GPU's Hardware Configuration Discovery
1608*4882a593Smuzhiyun * registers.
1609*4882a593Smuzhiyun *
1610*4882a593Smuzhiyun * The information is presented inefficiently for access. For frequent access,
1611*4882a593Smuzhiyun * the values should be better expressed in an unpacked form in the
1612*4882a593Smuzhiyun * base_gpu_props structure.
1613*4882a593Smuzhiyun *
1614*4882a593Smuzhiyun * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
1615*4882a593Smuzhiyun * allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
1616*4882a593Smuzhiyun * behaving differently?". In this case, all information about the
1617*4882a593Smuzhiyun * configuration is potentially useful, but it <b>does not need to be processed
1618*4882a593Smuzhiyun * by the driver</b>. Instead, the raw registers can be processed by the Mali
1619*4882a593Smuzhiyun * Tools software on the host PC.
1620*4882a593Smuzhiyun *
1621*4882a593Smuzhiyun */
1622*4882a593Smuzhiyun struct gpu_raw_gpu_props {
1623*4882a593Smuzhiyun u64 shader_present;
1624*4882a593Smuzhiyun u64 tiler_present;
1625*4882a593Smuzhiyun u64 l2_present;
1626*4882a593Smuzhiyun u64 stack_present;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun u32 l2_features;
1629*4882a593Smuzhiyun u32 suspend_size; /* API 8.2+ */
1630*4882a593Smuzhiyun u32 mem_features;
1631*4882a593Smuzhiyun u32 mmu_features;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun u32 as_present;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun u32 js_present;
1636*4882a593Smuzhiyun u32 js_features[GPU_MAX_JOB_SLOTS];
1637*4882a593Smuzhiyun u32 tiler_features;
1638*4882a593Smuzhiyun u32 texture_features[3];
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun u32 gpu_id;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun u32 thread_max_threads;
1643*4882a593Smuzhiyun u32 thread_max_workgroup_size;
1644*4882a593Smuzhiyun u32 thread_max_barrier_size;
1645*4882a593Smuzhiyun u32 thread_features;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun /*
1648*4882a593Smuzhiyun * Note: This is the _selected_ coherency mode rather than the
1649*4882a593Smuzhiyun * available modes as exposed in the coherency_features register.
1650*4882a593Smuzhiyun */
1651*4882a593Smuzhiyun u32 coherency_mode;
1652*4882a593Smuzhiyun };
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun /**
1655*4882a593Smuzhiyun * Return structure for _mali_base_get_gpu_props().
1656*4882a593Smuzhiyun *
1657*4882a593Smuzhiyun * NOTE: the raw_props member in this data structure contains the register
1658*4882a593Smuzhiyun * values from which the value of the other members are derived. The derived
1659*4882a593Smuzhiyun * members exist to allow for efficient access and/or shielding the details
1660*4882a593Smuzhiyun * of the layout of the registers.
1661*4882a593Smuzhiyun *
1662*4882a593Smuzhiyun */
1663*4882a593Smuzhiyun typedef struct mali_base_gpu_props {
1664*4882a593Smuzhiyun struct mali_base_gpu_core_props core_props;
1665*4882a593Smuzhiyun struct mali_base_gpu_l2_cache_props l2_props;
1666*4882a593Smuzhiyun u64 unused_1; /* keep for backwards compatibility */
1667*4882a593Smuzhiyun struct mali_base_gpu_tiler_props tiler_props;
1668*4882a593Smuzhiyun struct mali_base_gpu_thread_props thread_props;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun /** This member is large, likely to be 128 bytes */
1671*4882a593Smuzhiyun struct gpu_raw_gpu_props raw_props;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun /** This must be last member of the structure */
1674*4882a593Smuzhiyun struct mali_base_gpu_coherent_group_info coherency_info;
1675*4882a593Smuzhiyun } base_gpu_props;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun /** @} end group base_user_api_gpuprops_dyn */
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun /** @} end group base_user_api_gpuprops */
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun /**
1682*4882a593Smuzhiyun * @addtogroup base_user_api_core User-side Base core APIs
1683*4882a593Smuzhiyun * @{
1684*4882a593Smuzhiyun */
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun /**
1687*4882a593Smuzhiyun * \enum base_context_create_flags
1688*4882a593Smuzhiyun *
1689*4882a593Smuzhiyun * Flags to pass to ::base_context_init.
1690*4882a593Smuzhiyun * Flags can be ORed together to enable multiple things.
1691*4882a593Smuzhiyun *
1692*4882a593Smuzhiyun * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
1693*4882a593Smuzhiyun * not collide with them.
1694*4882a593Smuzhiyun */
1695*4882a593Smuzhiyun enum base_context_create_flags {
1696*4882a593Smuzhiyun /** No flags set */
1697*4882a593Smuzhiyun BASE_CONTEXT_CREATE_FLAG_NONE = 0,
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */
1700*4882a593Smuzhiyun BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0),
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun /** Base context is a 'System Monitor' context for Hardware counters.
1703*4882a593Smuzhiyun *
1704*4882a593Smuzhiyun * One important side effect of this is that job submission is disabled. */
1705*4882a593Smuzhiyun BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1)
1706*4882a593Smuzhiyun };
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun /**
1709*4882a593Smuzhiyun * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init()
1710*4882a593Smuzhiyun */
1711*4882a593Smuzhiyun #define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
1712*4882a593Smuzhiyun (((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \
1713*4882a593Smuzhiyun ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED))
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun /**
1716*4882a593Smuzhiyun * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel
1717*4882a593Smuzhiyun */
1718*4882a593Smuzhiyun #define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
1719*4882a593Smuzhiyun ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /*
1722*4882a593Smuzhiyun * Private flags used on the base context
1723*4882a593Smuzhiyun *
1724*4882a593Smuzhiyun * These start at bit 31, and run down to zero.
1725*4882a593Smuzhiyun *
1726*4882a593Smuzhiyun * They share the same space as @ref base_context_create_flags, and so must
1727*4882a593Smuzhiyun * not collide with them.
1728*4882a593Smuzhiyun */
1729*4882a593Smuzhiyun /** Private flag tracking whether job descriptor dumping is disabled */
1730*4882a593Smuzhiyun #define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED ((u32)(1 << 31))
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun /** @} end group base_user_api_core */
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun /** @} end group base_user_api */
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun /**
1737*4882a593Smuzhiyun * @addtogroup base_plat_config_gpuprops Base Platform Config GPU Properties
1738*4882a593Smuzhiyun * @{
1739*4882a593Smuzhiyun *
1740*4882a593Smuzhiyun * C Pre-processor macros are exposed here to do with Platform
1741*4882a593Smuzhiyun * Config.
1742*4882a593Smuzhiyun *
1743*4882a593Smuzhiyun * These include:
1744*4882a593Smuzhiyun * - GPU Properties that are constant on a particular Midgard Family
1745*4882a593Smuzhiyun * Implementation e.g. Maximum samples per pixel on Mali-T600.
1746*4882a593Smuzhiyun * - General platform config for the GPU, such as the GPU major and minor
1747*4882a593Smuzhiyun * revison.
1748*4882a593Smuzhiyun */
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun /** @} end group base_plat_config_gpuprops */
1751*4882a593Smuzhiyun
1752*4882a593Smuzhiyun /**
1753*4882a593Smuzhiyun * @addtogroup base_api Base APIs
1754*4882a593Smuzhiyun * @{
1755*4882a593Smuzhiyun */
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun /**
1758*4882a593Smuzhiyun * @brief The payload for a replay job. This must be in GPU memory.
1759*4882a593Smuzhiyun */
1760*4882a593Smuzhiyun typedef struct base_jd_replay_payload {
1761*4882a593Smuzhiyun /**
1762*4882a593Smuzhiyun * Pointer to the first entry in the base_jd_replay_jc list. These
1763*4882a593Smuzhiyun * will be replayed in @b reverse order (so that extra ones can be added
1764*4882a593Smuzhiyun * to the head in future soft jobs without affecting this soft job)
1765*4882a593Smuzhiyun */
1766*4882a593Smuzhiyun u64 tiler_jc_list;
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun /**
1769*4882a593Smuzhiyun * Pointer to the fragment job chain.
1770*4882a593Smuzhiyun */
1771*4882a593Smuzhiyun u64 fragment_jc;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun /**
1774*4882a593Smuzhiyun * Pointer to the tiler heap free FBD field to be modified.
1775*4882a593Smuzhiyun */
1776*4882a593Smuzhiyun u64 tiler_heap_free;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun /**
1779*4882a593Smuzhiyun * Hierarchy mask for the replayed fragment jobs. May be zero.
1780*4882a593Smuzhiyun */
1781*4882a593Smuzhiyun u16 fragment_hierarchy_mask;
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun /**
1784*4882a593Smuzhiyun * Hierarchy mask for the replayed tiler jobs. May be zero.
1785*4882a593Smuzhiyun */
1786*4882a593Smuzhiyun u16 tiler_hierarchy_mask;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun /**
1789*4882a593Smuzhiyun * Default weight to be used for hierarchy levels not in the original
1790*4882a593Smuzhiyun * mask.
1791*4882a593Smuzhiyun */
1792*4882a593Smuzhiyun u32 hierarchy_default_weight;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun /**
1795*4882a593Smuzhiyun * Core requirements for the tiler job chain
1796*4882a593Smuzhiyun */
1797*4882a593Smuzhiyun base_jd_core_req tiler_core_req;
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /**
1800*4882a593Smuzhiyun * Core requirements for the fragment job chain
1801*4882a593Smuzhiyun */
1802*4882a593Smuzhiyun base_jd_core_req fragment_core_req;
1803*4882a593Smuzhiyun } base_jd_replay_payload;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun #ifdef BASE_LEGACY_UK10_2_SUPPORT
1806*4882a593Smuzhiyun typedef struct base_jd_replay_payload_uk10_2 {
1807*4882a593Smuzhiyun u64 tiler_jc_list;
1808*4882a593Smuzhiyun u64 fragment_jc;
1809*4882a593Smuzhiyun u64 tiler_heap_free;
1810*4882a593Smuzhiyun u16 fragment_hierarchy_mask;
1811*4882a593Smuzhiyun u16 tiler_hierarchy_mask;
1812*4882a593Smuzhiyun u32 hierarchy_default_weight;
1813*4882a593Smuzhiyun u16 tiler_core_req;
1814*4882a593Smuzhiyun u16 fragment_core_req;
1815*4882a593Smuzhiyun u8 padding[4];
1816*4882a593Smuzhiyun } base_jd_replay_payload_uk10_2;
1817*4882a593Smuzhiyun #endif /* BASE_LEGACY_UK10_2_SUPPORT */
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun /**
1820*4882a593Smuzhiyun * @brief An entry in the linked list of job chains to be replayed. This must
1821*4882a593Smuzhiyun * be in GPU memory.
1822*4882a593Smuzhiyun */
1823*4882a593Smuzhiyun typedef struct base_jd_replay_jc {
1824*4882a593Smuzhiyun /**
1825*4882a593Smuzhiyun * Pointer to next entry in the list. A setting of NULL indicates the
1826*4882a593Smuzhiyun * end of the list.
1827*4882a593Smuzhiyun */
1828*4882a593Smuzhiyun u64 next;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun /**
1831*4882a593Smuzhiyun * Pointer to the job chain.
1832*4882a593Smuzhiyun */
1833*4882a593Smuzhiyun u64 jc;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun } base_jd_replay_jc;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun /* Maximum number of jobs allowed in a fragment chain in the payload of a
1838*4882a593Smuzhiyun * replay job */
1839*4882a593Smuzhiyun #define BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT 256
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun /** @} end group base_api */
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun typedef struct base_profiling_controls {
1844*4882a593Smuzhiyun u32 profiling_controls[FBDUMP_CONTROL_MAX];
1845*4882a593Smuzhiyun } base_profiling_controls;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun /* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
1848*4882a593Smuzhiyun * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST) */
1849*4882a593Smuzhiyun #define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun /* Indicate that job dumping is enabled. This could affect certain timers
1852*4882a593Smuzhiyun * to account for the performance impact. */
1853*4882a593Smuzhiyun #define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun #define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
1856*4882a593Smuzhiyun BASE_TLSTREAM_JOB_DUMPING_ENABLED)
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun #endif /* _BASE_KERNEL_H_ */
1859