1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * 4 * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. 5 * 6 * This program is free software and is provided to you under the terms of the 7 * GNU General Public License version 2 as published by the Free Software 8 * Foundation, and any use by you of this program is subject to the terms 9 * of such GNU license. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 */ 21 22 #include <mali_kbase.h> 23 24 /** 25 * kbase_device_get_list - get device list. 26 * Get access to device list. 27 * 28 * Return: Pointer to the linked list head. 29 */ 30 const struct list_head *kbase_device_get_list(void); 31 32 /** 33 * kbase_device_put_list - put device list. 34 * 35 * @dev_list: head of linked list containing device list. 36 * 37 * Put access to the device list. 38 */ 39 void kbase_device_put_list(const struct list_head *dev_list); 40 41 /** 42 * kbase_increment_device_id - increment device id. 43 * 44 * Used to increment device id on successful initialization of the device. 45 */ 46 void kbase_increment_device_id(void); 47 48 /** 49 * kbase_device_firmware_init_once - Initialize firmware and HWC 50 * 51 * @kbdev: An instance of the GPU platform device, allocated from the probe 52 * method of the driver. 53 * 54 * When a device file is opened for the first time, 55 * load firmware and initialize hardware counter components. 56 * 57 * Return: 0 on success. An error code on failure. 58 */ 59 int kbase_device_firmware_init_once(struct kbase_device *kbdev); 60 61 /** 62 * kbase_device_init - Device initialisation. 63 * 64 * @kbdev: The kbase device structure for the device (must be a valid pointer) 65 * 66 * This is called from device probe to initialise various other 67 * components needed. 68 * 69 * Return: 0 on success and non-zero value on failure. 70 */ 71 int kbase_device_init(struct kbase_device *kbdev); 72 73 /** 74 * kbase_device_term - Device termination. 75 * 76 * @kbdev: The kbase device structure for the device (must be a valid pointer) 77 * 78 * This is called from device remove to terminate various components that 79 * were initialised during kbase_device_init. 80 */ 81 void kbase_device_term(struct kbase_device *kbdev); 82 83 /** 84 * kbase_reg_write - write to GPU register 85 * @kbdev: Kbase device pointer 86 * @offset: Offset of register 87 * @value: Value to write 88 * 89 * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). 90 */ 91 void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value); 92 93 /** 94 * kbase_reg_read - read from GPU register 95 * @kbdev: Kbase device pointer 96 * @offset: Offset of register 97 * 98 * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). 99 * 100 * Return: Value in desired register 101 */ 102 u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset); 103 104 /** 105 * kbase_is_gpu_removed() - Has the GPU been removed. 106 * @kbdev: Kbase device pointer 107 * 108 * When Kbase takes too long to give up the GPU, the Arbiter 109 * can remove it. This will then be followed by a GPU lost event. 110 * This function will return true if the GPU has been removed. 111 * When this happens register reads will be zero. A zero GPU_ID is 112 * invalid so this is used to detect when GPU is removed. 113 * 114 * Return: True if GPU removed 115 */ 116 bool kbase_is_gpu_removed(struct kbase_device *kbdev); 117 118 /** 119 * kbase_gpu_cache_flush_pa_range_and_busy_wait() - Start a cache physical range flush 120 * and busy wait 121 * 122 * @kbdev: kbase device to issue the MMU operation on. 123 * @phys: Starting address of the physical range to start the operation on. 124 * @nr_bytes: Number of bytes to work on. 125 * @flush_op: Flush command register value to be sent to HW 126 * 127 * Issue a cache flush physical range command, then busy wait an irq status. 128 * This function will clear FLUSH_PA_RANGE_COMPLETED irq mask bit 129 * and busy-wait the rawstat register. 130 * 131 * Return: 0 if successful or a negative error code on failure. 132 */ 133 #if MALI_USE_CSF 134 int kbase_gpu_cache_flush_pa_range_and_busy_wait(struct kbase_device *kbdev, phys_addr_t phys, 135 size_t nr_bytes, u32 flush_op); 136 #endif /* MALI_USE_CSF */ 137 138 /** 139 * kbase_gpu_cache_flush_and_busy_wait - Start a cache flush and busy wait 140 * @kbdev: Kbase device 141 * @flush_op: Flush command register value to be sent to HW 142 * 143 * Issue a cache flush command to hardware, then busy wait an irq status. 144 * This function will clear CLEAN_CACHES_COMPLETED irq mask bit set by other 145 * threads through kbase_gpu_start_cache_clean(), and wake them up manually 146 * after the busy-wait is done. Any pended cache flush commands raised by 147 * other thread are handled in this function. 148 * hwaccess_lock must be held by the caller. 149 * 150 * Return: 0 if successful or a negative error code on failure. 151 */ 152 int kbase_gpu_cache_flush_and_busy_wait(struct kbase_device *kbdev, 153 u32 flush_op); 154 155 /** 156 * kbase_gpu_start_cache_clean - Start a cache clean 157 * @kbdev: Kbase device 158 * @flush_op: Flush command register value to be sent to HW 159 * 160 * Issue a given cache flush command to hardware. 161 * This function will take hwaccess_lock. 162 */ 163 void kbase_gpu_start_cache_clean(struct kbase_device *kbdev, u32 flush_op); 164 165 /** 166 * kbase_gpu_start_cache_clean_nolock - Start a cache clean 167 * @kbdev: Kbase device 168 * @flush_op: Flush command register value to be sent to HW 169 * 170 * Issue a given cache flush command to hardware. 171 * hwaccess_lock must be held by the caller. 172 */ 173 void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev, 174 u32 flush_op); 175 176 /** 177 * kbase_gpu_wait_cache_clean - Wait for cache cleaning to finish 178 * @kbdev: Kbase device 179 * 180 * This function will take hwaccess_lock, and may sleep. 181 */ 182 void kbase_gpu_wait_cache_clean(struct kbase_device *kbdev); 183 184 /** 185 * kbase_gpu_wait_cache_clean_timeout - Wait for certain time for cache 186 * cleaning to finish 187 * @kbdev: Kbase device 188 * @wait_timeout_ms: Time in milliseconds, to wait for cache clean to complete. 189 * 190 * This function will take hwaccess_lock, and may sleep. This is supposed to be 191 * called from paths (like GPU reset) where an indefinite wait for the 192 * completion of cache clean operation can cause deadlock, as the operation may 193 * never complete. 194 * 195 * Return: 0 if successful or a negative error code on failure. 196 */ 197 int kbase_gpu_wait_cache_clean_timeout(struct kbase_device *kbdev, 198 unsigned int wait_timeout_ms); 199 200 /** 201 * kbase_gpu_cache_clean_wait_complete - Called after the cache cleaning is 202 * finished. Would also be called after 203 * the GPU reset. 204 * @kbdev: Kbase device 205 * 206 * Caller must hold the hwaccess_lock. 207 */ 208 void kbase_gpu_cache_clean_wait_complete(struct kbase_device *kbdev); 209 210 /** 211 * kbase_clean_caches_done - Issue previously queued cache clean request or 212 * wake up the requester that issued cache clean. 213 * @kbdev: Kbase device 214 * 215 * Caller must hold the hwaccess_lock. 216 */ 217 void kbase_clean_caches_done(struct kbase_device *kbdev); 218 219 /** 220 * kbase_gpu_interrupt - GPU interrupt handler 221 * @kbdev: Kbase device pointer 222 * @val: The value of the GPU IRQ status register which triggered the call 223 * 224 * This function is called from the interrupt handler when a GPU irq is to be 225 * handled. 226 */ 227 void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val); 228