1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 /* 3 * 4 * (C) COPYRIGHT 2014-2015, 2018-2022 ARM Limited. All rights reserved. 5 * 6 * This program is free software and is provided to you under the terms of the 7 * GNU General Public License version 2 as published by the Free Software 8 * Foundation, and any use by you of this program is subject to the terms 9 * of such GNU license. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, you can access it online at 18 * http://www.gnu.org/licenses/gpl-2.0.html. 19 * 20 */ 21 22 /** 23 * DOC: Interface file for accessing MMU hardware functionality 24 * 25 * This module provides an abstraction for accessing the functionality provided 26 * by the midgard MMU and thus allows all MMU HW access to be contained within 27 * one common place and allows for different backends (implementations) to 28 * be provided. 29 */ 30 31 #ifndef _KBASE_MMU_HW_H_ 32 #define _KBASE_MMU_HW_H_ 33 34 #include "mali_kbase_mmu.h" 35 36 /* Forward declarations */ 37 struct kbase_device; 38 struct kbase_as; 39 struct kbase_context; 40 41 /** 42 * enum kbase_mmu_fault_type - MMU fault type descriptor. 43 * @KBASE_MMU_FAULT_TYPE_UNKNOWN: unknown fault 44 * @KBASE_MMU_FAULT_TYPE_PAGE: page fault 45 * @KBASE_MMU_FAULT_TYPE_BUS: nus fault 46 * @KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED: page_unexpected fault 47 * @KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED: bus_unexpected fault 48 */ 49 enum kbase_mmu_fault_type { 50 KBASE_MMU_FAULT_TYPE_UNKNOWN = 0, 51 KBASE_MMU_FAULT_TYPE_PAGE, 52 KBASE_MMU_FAULT_TYPE_BUS, 53 KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED, 54 KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED 55 }; 56 57 /** 58 * struct kbase_mmu_hw_op_param - parameters for kbase_mmu_hw_do_* functions 59 * @vpfn: MMU Virtual Page Frame Number to start the operation on. 60 * @nr: Number of pages to work on. 61 * @op: Operation type (written to ASn_COMMAND). 62 * @kctx_id: Kernel context ID for MMU command tracepoint. 63 * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops. 64 * @flush_skip_levels: Page table levels to skip flushing. (Only 65 * applicable if GPU supports feature) 66 */ 67 struct kbase_mmu_hw_op_param { 68 u64 vpfn; 69 u32 nr; 70 enum kbase_mmu_op_type op; 71 u32 kctx_id; 72 enum kbase_caller_mmu_sync_info mmu_sync_info; 73 u64 flush_skip_levels; 74 }; 75 76 /** 77 * kbase_mmu_hw_configure - Configure an address space for use. 78 * @kbdev: kbase device to configure. 79 * @as: address space to configure. 80 * 81 * Configure the MMU using the address space details setup in the 82 * kbase_context structure. 83 */ 84 void kbase_mmu_hw_configure(struct kbase_device *kbdev, 85 struct kbase_as *as); 86 87 /** 88 * kbase_mmu_hw_do_lock - Issue LOCK command to the MMU and program 89 * the LOCKADDR register. 90 * 91 * @kbdev: Kbase device to issue the MMU operation on. 92 * @as: Address space to issue the MMU operation on. 93 * @op_param: Pointer to struct containing information about the MMU 94 * operation to perform. 95 * 96 * hwaccess_lock needs to be held when calling this function. 97 * 98 * Return: 0 if issuing the command was successful, otherwise an error code. 99 */ 100 int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as, 101 const struct kbase_mmu_hw_op_param *op_param); 102 103 /** 104 * kbase_mmu_hw_do_unlock_no_addr - Issue UNLOCK command to the MMU without 105 * programming the LOCKADDR register and wait 106 * for it to complete before returning. 107 * 108 * @kbdev: Kbase device to issue the MMU operation on. 109 * @as: Address space to issue the MMU operation on. 110 * @op_param: Pointer to struct containing information about the MMU 111 * operation to perform. 112 * 113 * This function should be called for GPU where GPU command is used to flush 114 * the cache(s) instead of MMU command. 115 * 116 * Return: 0 if issuing the command was successful, otherwise an error code. 117 */ 118 int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *as, 119 const struct kbase_mmu_hw_op_param *op_param); 120 121 /** 122 * kbase_mmu_hw_do_unlock - Issue UNLOCK command to the MMU and wait for it 123 * to complete before returning. 124 * 125 * @kbdev: Kbase device to issue the MMU operation on. 126 * @as: Address space to issue the MMU operation on. 127 * @op_param: Pointer to struct containing information about the MMU 128 * operation to perform. 129 * 130 * Return: 0 if issuing the command was successful, otherwise an error code. 131 */ 132 int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as, 133 const struct kbase_mmu_hw_op_param *op_param); 134 /** 135 * kbase_mmu_hw_do_flush - Issue a flush operation to the MMU. 136 * 137 * @kbdev: Kbase device to issue the MMU operation on. 138 * @as: Address space to issue the MMU operation on. 139 * @op_param: Pointer to struct containing information about the MMU 140 * operation to perform. 141 * 142 * Issue a flush operation on the address space as per the information 143 * specified inside @op_param. This function should not be called for 144 * GPUs where MMU command to flush the cache(s) is deprecated. 145 * mmu_hw_mutex needs to be held when calling this function. 146 * 147 * Return: 0 if the operation was successful, non-zero otherwise. 148 */ 149 int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as, 150 const struct kbase_mmu_hw_op_param *op_param); 151 152 /** 153 * kbase_mmu_hw_do_flush_locked - Issue a flush operation to the MMU. 154 * 155 * @kbdev: Kbase device to issue the MMU operation on. 156 * @as: Address space to issue the MMU operation on. 157 * @op_param: Pointer to struct containing information about the MMU 158 * operation to perform. 159 * 160 * Issue a flush operation on the address space as per the information 161 * specified inside @op_param. This function should not be called for 162 * GPUs where MMU command to flush the cache(s) is deprecated. 163 * Both mmu_hw_mutex and hwaccess_lock need to be held when calling this 164 * function. 165 * 166 * Return: 0 if the operation was successful, non-zero otherwise. 167 */ 168 int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as, 169 const struct kbase_mmu_hw_op_param *op_param); 170 171 /** 172 * kbase_mmu_hw_do_flush_on_gpu_ctrl - Issue a flush operation to the MMU. 173 * 174 * @kbdev: Kbase device to issue the MMU operation on. 175 * @as: Address space to issue the MMU operation on. 176 * @op_param: Pointer to struct containing information about the MMU 177 * operation to perform. 178 * 179 * Issue a flush operation on the address space as per the information 180 * specified inside @op_param. GPU command is used to flush the cache(s) 181 * instead of the MMU command. 182 * 183 * Return: 0 if the operation was successful, non-zero otherwise. 184 */ 185 int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_as *as, 186 const struct kbase_mmu_hw_op_param *op_param); 187 188 /** 189 * kbase_mmu_hw_clear_fault - Clear a fault that has been previously reported by 190 * the MMU. 191 * @kbdev: kbase device to clear the fault from. 192 * @as: address space to clear the fault from. 193 * @type: The type of fault that needs to be cleared. 194 * 195 * Clear a bus error or page fault that has been reported by the MMU. 196 */ 197 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as, 198 enum kbase_mmu_fault_type type); 199 200 /** 201 * kbase_mmu_hw_enable_fault - Enable fault that has been previously reported by 202 * the MMU. 203 * @kbdev: kbase device to again enable the fault from. 204 * @as: address space to again enable the fault from. 205 * @type: The type of fault that needs to be enabled again. 206 * 207 * After a page fault or bus error has been reported by the MMU these 208 * will be disabled. After these are handled this function needs to be 209 * called to enable the page fault or bus error fault again. 210 */ 211 void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as, 212 enum kbase_mmu_fault_type type); 213 214 #endif /* _KBASE_MMU_HW_H_ */ 215