1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * 4 * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved. 5 * 6 * This program is free software and is provided to you under the terms of the 7 * GNU General Public License version 2 as published by the Free Software 8 * Foundation, and any use by you of this program is subject to the terms 9 * of such GNU license. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 */ 21 22 #ifndef _KBASE_CSF_TILER_HEAP_DEF_H_ 23 #define _KBASE_CSF_TILER_HEAP_DEF_H_ 24 25 #include <mali_kbase.h> 26 27 /* Size of a tiler heap chunk header, in bytes. */ 28 #define CHUNK_HDR_SIZE ((size_t)64) 29 30 /* Bit-position of the next chunk's size when stored in a chunk header. */ 31 #define CHUNK_HDR_NEXT_SIZE_POS (0) 32 33 /* Bit-position of the next chunk's address when stored in a chunk header. */ 34 #define CHUNK_HDR_NEXT_ADDR_POS (12) 35 36 /* Bitmask of the next chunk's size when stored in a chunk header. */ 37 #define CHUNK_HDR_NEXT_SIZE_MASK (((u64)1 << CHUNK_HDR_NEXT_ADDR_POS) - 1u) 38 39 /* Bitmask of the address of the next chunk when stored in a chunk header. */ 40 #define CHUNK_HDR_NEXT_ADDR_MASK (~CHUNK_HDR_NEXT_SIZE_MASK) 41 42 /* Right-shift before storing the next chunk's size in a chunk header. */ 43 #define CHUNK_HDR_NEXT_SIZE_ENCODE_SHIFT (12) 44 45 /* Right-shift before storing the next chunk's address in a chunk header. */ 46 #define CHUNK_HDR_NEXT_ADDR_ENCODE_SHIFT (12) 47 48 /* Bitmask of valid chunk sizes. This is also the maximum chunk size, in bytes. 49 */ 50 #define CHUNK_SIZE_MASK \ 51 ((CHUNK_HDR_NEXT_SIZE_MASK >> CHUNK_HDR_NEXT_SIZE_POS) << \ 52 CHUNK_HDR_NEXT_SIZE_ENCODE_SHIFT) 53 54 /* Bitmask of valid chunk addresses. This is also the highest address. */ 55 #define CHUNK_ADDR_MASK \ 56 ((CHUNK_HDR_NEXT_ADDR_MASK >> CHUNK_HDR_NEXT_ADDR_POS) << \ 57 CHUNK_HDR_NEXT_ADDR_ENCODE_SHIFT) 58 59 /* The size of the area needed to be vmapped prior to handing the tiler heap 60 * over to the tiler, so that the shrinker could be invoked. 61 */ 62 #define NEXT_CHUNK_ADDR_SIZE (sizeof(u64)) 63 64 /** 65 * struct kbase_csf_tiler_heap_chunk - A tiler heap chunk managed by the kernel 66 * 67 * @link: Link to this chunk in a list of chunks belonging to a 68 * @kbase_csf_tiler_heap. 69 * @region: Pointer to the GPU memory region allocated for the chunk. 70 * @map: Kernel VA mapping so that we would not need to use vmap in the 71 * shrinker callback, which can allocate. This maps only the header 72 * of the chunk, so it could be traversed. 73 * @gpu_va: GPU virtual address of the start of the memory region. 74 * This points to the header of the chunk and not to the low address 75 * of free memory within it. 76 * 77 * Chunks are allocated upon initialization of a tiler heap or in response to 78 * out-of-memory events from the firmware. Chunks are always fully backed by 79 * physical memory to avoid the overhead of processing GPU page faults. The 80 * allocated GPU memory regions are linked together independent of the list of 81 * kernel objects of this type. 82 */ 83 struct kbase_csf_tiler_heap_chunk { 84 struct list_head link; 85 struct kbase_va_region *region; 86 struct kbase_vmap_struct map; 87 u64 gpu_va; 88 }; 89 90 #define HEAP_BUF_DESCRIPTOR_CHECKED (1 << 0) 91 92 /** 93 * struct kbase_csf_tiler_heap - A tiler heap managed by the kernel 94 * 95 * @kctx: Pointer to the kbase context with which this heap is 96 * associated. 97 * @link: Link to this heap in a list of tiler heaps belonging to 98 * the @kbase_csf_tiler_heap_context. 99 * @chunks_list: Linked list of allocated chunks. 100 * @gpu_va: The GPU virtual address of the heap context structure that 101 * was allocated for the firmware. This is also used to 102 * uniquely identify the heap. 103 * @heap_id: Unique id representing the heap, assigned during heap 104 * initialization. 105 * @buf_desc_va: Buffer descriptor GPU VA. Can be 0 for backward compatible 106 * to earlier version base interfaces. 107 * @buf_desc_reg: Pointer to the VA region that covers the provided buffer 108 * descriptor memory object pointed to by buf_desc_va. 109 * @gpu_va_map: Kernel VA mapping of the GPU VA region. 110 * @buf_desc_map: Kernel VA mapping of the buffer descriptor, read from 111 * during the tiler heap shrinker. Sync operations may need 112 * to be done before each read. 113 * @chunk_size: Size of each chunk, in bytes. Must be page-aligned. 114 * @chunk_count: The number of chunks currently allocated. Must not be 115 * zero or greater than @max_chunks. 116 * @max_chunks: The maximum number of chunks that the heap should be 117 * allowed to use. Must not be less than @chunk_count. 118 * @target_in_flight: Number of render-passes that the driver should attempt 119 * to keep in flight for which allocation of new chunks is 120 * allowed. Must not be zero. 121 * @buf_desc_checked: Indicates if runtime check on buffer descriptor has been done. 122 */ 123 struct kbase_csf_tiler_heap { 124 struct kbase_context *kctx; 125 struct list_head link; 126 struct list_head chunks_list; 127 u64 gpu_va; 128 u64 heap_id; 129 u64 buf_desc_va; 130 struct kbase_va_region *buf_desc_reg; 131 struct kbase_vmap_struct buf_desc_map; 132 struct kbase_vmap_struct gpu_va_map; 133 u32 chunk_size; 134 u32 chunk_count; 135 u32 max_chunks; 136 u16 target_in_flight; 137 bool buf_desc_checked; 138 }; 139 140 #endif /* !_KBASE_CSF_TILER_HEAP_DEF_H_ */ 141