xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #ifndef _KBASE_MMU_H_
23 #define _KBASE_MMU_H_
24 
25 #include <uapi/gpu/arm/bifrost/mali_base_kernel.h>
26 
27 #define KBASE_MMU_PAGE_ENTRIES 512
28 #define KBASE_MMU_INVALID_PGD_ADDRESS (~(phys_addr_t)0)
29 
30 struct kbase_context;
31 struct kbase_mmu_table;
32 struct kbase_va_region;
33 
34 /**
35  * enum kbase_caller_mmu_sync_info - MMU-synchronous caller info.
36  * A pointer to this type is passed down from the outer-most callers in the kbase
37  * module - where the information resides as to the synchronous / asynchronous
38  * nature of the call flow, with respect to MMU operations. ie - does the call flow relate to
39  * existing GPU work does it come from requests (like ioctl) from user-space, power management,
40  * etc.
41  *
42  * @CALLER_MMU_UNSET_SYNCHRONICITY: default value must be invalid to avoid accidental choice
43  *                                  of a 'valid' value
44  * @CALLER_MMU_SYNC: Arbitrary value for 'synchronous that isn't easy to choose by accident
45  * @CALLER_MMU_ASYNC: Also hard to choose by accident
46  */
47 enum kbase_caller_mmu_sync_info {
48 	CALLER_MMU_UNSET_SYNCHRONICITY,
49 	CALLER_MMU_SYNC = 0x02,
50 	CALLER_MMU_ASYNC
51 };
52 
53 /**
54  * enum kbase_mmu_op_type - enum for MMU operations
55  * @KBASE_MMU_OP_NONE:        To help catch uninitialized struct
56  * @KBASE_MMU_OP_FIRST:       The lower boundary of enum
57  * @KBASE_MMU_OP_LOCK:        Lock memory region
58  * @KBASE_MMU_OP_UNLOCK:      Unlock memory region
59  * @KBASE_MMU_OP_FLUSH_PT:    Flush page table (CLN+INV L2 only)
60  * @KBASE_MMU_OP_FLUSH_MEM:   Flush memory (CLN+INV L2+LSC)
61  * @KBASE_MMU_OP_COUNT:       The upper boundary of enum
62  */
63 enum kbase_mmu_op_type {
64 	KBASE_MMU_OP_NONE = 0, /* Must be zero */
65 	KBASE_MMU_OP_FIRST, /* Must be the first non-zero op */
66 	KBASE_MMU_OP_LOCK = KBASE_MMU_OP_FIRST,
67 	KBASE_MMU_OP_UNLOCK,
68 	KBASE_MMU_OP_FLUSH_PT,
69 	KBASE_MMU_OP_FLUSH_MEM,
70 	KBASE_MMU_OP_COUNT /* Must be the last in enum */
71 };
72 
73 /**
74  * kbase_mmu_as_init() - Initialising GPU address space object.
75  *
76  * @kbdev: The kbase device structure for the device (must be a valid pointer).
77  * @i:     Array index of address space object.
78  *
79  * This is called from device probe to initialise an address space object
80  * of the device.
81  *
82  * Return: 0 on success and non-zero value on failure.
83  */
84 int kbase_mmu_as_init(struct kbase_device *kbdev, unsigned int i);
85 
86 /**
87  * kbase_mmu_as_term() - Terminate address space object.
88  *
89  * @kbdev: The kbase device structure for the device (must be a valid pointer).
90  * @i:     Array index of address space object.
91  *
92  * This is called upon device termination to destroy
93  * the address space object of the device.
94  */
95 void kbase_mmu_as_term(struct kbase_device *kbdev, unsigned int i);
96 
97 /**
98  * kbase_mmu_init - Initialise an object representing GPU page tables
99  *
100  * @kbdev:    Instance of GPU platform device, allocated from the probe method.
101  * @mmut:     GPU page tables to be initialized.
102  * @kctx:     Optional kbase context, may be NULL if this set of MMU tables
103  *            is not associated with a context.
104  * @group_id: The physical group ID from which to allocate GPU page tables.
105  *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
106  *
107  * The structure should be terminated using kbase_mmu_term()
108  *
109  * Return:    0 if successful, otherwise a negative error code.
110  */
111 int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
112 		struct kbase_context *kctx, int group_id);
113 
114 /**
115  * kbase_mmu_interrupt - Process an MMU interrupt.
116  *
117  * @kbdev:       Pointer to the kbase device for which the interrupt happened.
118  * @irq_stat:    Value of the MMU_IRQ_STATUS register.
119  *
120  * Process the MMU interrupt that was reported by the &kbase_device.
121  */
122 void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
123 
124 /**
125  * kbase_mmu_term - Terminate an object representing GPU page tables
126  *
127  * @kbdev: Instance of GPU platform device, allocated from the probe method.
128  * @mmut:  GPU page tables to be destroyed.
129  *
130  * This will free any page tables that have been allocated
131  */
132 void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
133 
134 /**
135  * kbase_mmu_create_ate - Create an address translation entry
136  *
137  * @kbdev:    Instance of GPU platform device, allocated from the probe method.
138  * @phy:      Physical address of the page to be mapped for GPU access.
139  * @flags:    Bitmask of attributes of the GPU memory region being mapped.
140  * @level:    Page table level for which to build an address translation entry.
141  * @group_id: The physical memory group in which the page was allocated.
142  *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
143  *
144  * This function creates an address translation entry to encode the physical
145  * address of a page to be mapped for access by the GPU, along with any extra
146  * attributes required for the GPU memory region.
147  *
148  * Return: An address translation entry, either in LPAE or AArch64 format
149  *         (depending on the driver's configuration).
150  */
151 u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
152 	struct tagged_addr phy, unsigned long flags, int level, int group_id);
153 
154 int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
155 				    u64 vpfn, struct tagged_addr *phys, size_t nr,
156 				    unsigned long flags, int group_id, u64 *dirty_pgds,
157 				    struct kbase_va_region *reg, bool ignore_page_migration);
158 int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn,
159 			   struct tagged_addr *phys, size_t nr, unsigned long flags, int as_nr,
160 			   int group_id, enum kbase_caller_mmu_sync_info mmu_sync_info,
161 			   struct kbase_va_region *reg, bool ignore_page_migration);
162 int kbase_mmu_insert_imported_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
163 				    u64 vpfn, struct tagged_addr *phys, size_t nr,
164 				    unsigned long flags, int as_nr, int group_id,
165 				    enum kbase_caller_mmu_sync_info mmu_sync_info,
166 				    struct kbase_va_region *reg);
167 int kbase_mmu_insert_aliased_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
168 				   u64 vpfn, struct tagged_addr *phys, size_t nr,
169 				   unsigned long flags, int as_nr, int group_id,
170 				   enum kbase_caller_mmu_sync_info mmu_sync_info,
171 				   struct kbase_va_region *reg);
172 int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, struct tagged_addr phys,
173 				 size_t nr, unsigned long flags, int group_id,
174 				 enum kbase_caller_mmu_sync_info mmu_sync_info,
175 				 bool ignore_page_migration);
176 int kbase_mmu_insert_single_imported_page(struct kbase_context *kctx, u64 vpfn,
177 					  struct tagged_addr phys, size_t nr, unsigned long flags,
178 					  int group_id,
179 					  enum kbase_caller_mmu_sync_info mmu_sync_info);
180 int kbase_mmu_insert_single_aliased_page(struct kbase_context *kctx, u64 vpfn,
181 					 struct tagged_addr phys, size_t nr, unsigned long flags,
182 					 int group_id,
183 					 enum kbase_caller_mmu_sync_info mmu_sync_info);
184 
185 /**
186  * kbase_mmu_teardown_pages - Remove GPU virtual addresses from the MMU page table
187  *
188  * @kbdev:    Pointer to kbase device.
189  * @mmut:     Pointer to GPU MMU page table.
190  * @vpfn:     Start page frame number of the GPU virtual pages to unmap.
191  * @phys:     Array of physical pages currently mapped to the virtual
192  *            pages to unmap, or NULL. This is used for GPU cache maintenance
193  *            and page migration support.
194  * @nr_phys_pages: Number of physical pages to flush.
195  * @nr_virt_pages: Number of virtual pages whose PTEs should be destroyed.
196  * @as_nr:    Address space number, for GPU cache maintenance operations
197  *            that happen outside a specific kbase context.
198  * @ignore_page_migration: Whether page migration metadata should be ignored.
199  *
200  * We actually discard the ATE and free the page table pages if no valid entries
201  * exist in PGD.
202  *
203  * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
204  * currently scheduled into the runpool, and so potentially uses a lot of locks.
205  * These locks must be taken in the correct order with respect to others
206  * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
207  * information.
208  *
209  * The @p phys pointer to physical pages is not necessary for unmapping virtual memory,
210  * but it is used for fine-grained GPU cache maintenance. If @p phys is NULL,
211  * GPU cache maintenance will be done as usual, that is invalidating the whole GPU caches
212  * instead of specific physical address ranges.
213  *
214  * Return: 0 on success, otherwise an error code.
215  */
216 int kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn,
217 			     struct tagged_addr *phys, size_t nr_phys_pages, size_t nr_virt_pages,
218 			     int as_nr, bool ignore_page_migration);
219 
220 int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
221 			   struct tagged_addr *phys, size_t nr,
222 			   unsigned long flags, int const group_id);
223 #if MALI_USE_CSF
224 /**
225  * kbase_mmu_update_csf_mcu_pages - Update MCU mappings with changes of phys and flags
226  *
227  * @kbdev:    Pointer to kbase device.
228  * @vpfn:     Virtual PFN (Page Frame Number) of the first page to update
229  * @phys:     Pointer to the array of tagged physical addresses of the physical
230  *            pages that are pointed to by the page table entries (that need to
231  *            be updated).
232  * @nr:       Number of pages to update
233  * @flags:    Flags
234  * @group_id: The physical memory group in which the page was allocated.
235  *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
236  *
237  * Return: 0 on success, otherwise an error code.
238  */
239 int kbase_mmu_update_csf_mcu_pages(struct kbase_device *kbdev, u64 vpfn, struct tagged_addr *phys,
240 				   size_t nr, unsigned long flags, int const group_id);
241 #endif
242 
243 /**
244  * kbase_mmu_migrate_page - Migrate GPU mappings and content between memory pages
245  *
246  * @old_phys:     Old physical page to be replaced.
247  * @new_phys:     New physical page used to replace old physical page.
248  * @old_dma_addr: DMA address of the old page.
249  * @new_dma_addr: DMA address of the new page.
250  * @level:        MMU page table level of the provided PGD.
251  *
252  * The page migration process is made of 2 big steps:
253  *
254  * 1) Copy the content of the old page to the new page.
255  * 2) Remap the virtual page, that is: replace either the ATE (if the old page
256  *    was a regular page) or the PTE (if the old page was used as a PGD) in the
257  *    MMU page table with the new page.
258  *
259  * During the process, the MMU region is locked to prevent GPU access to the
260  * virtual memory page that is being remapped.
261  *
262  * Before copying the content of the old page to the new page and while the
263  * MMU region is locked, a GPU cache flush is performed to make sure that
264  * pending GPU writes are finalized to the old page before copying.
265  * That is necessary because otherwise there's a risk that GPU writes might
266  * be finalized to the old page, and not new page, after migration.
267  * The MMU region is unlocked only at the end of the migration operation.
268  *
269  * Return: 0 on success, otherwise an error code.
270  */
271 int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_phys,
272 			   dma_addr_t old_dma_addr, dma_addr_t new_dma_addr, int level);
273 
274 /**
275  * kbase_mmu_flush_pa_range() - Flush physical address range from the GPU caches
276  *
277  * @kbdev:    Instance of GPU platform device, allocated from the probe method.
278  * @kctx:     Pointer to kbase context, it can be NULL if the physical address
279  *            range is not associated with User created context.
280  * @phys:     Starting address of the physical range to start the operation on.
281  * @size:     Number of bytes to work on.
282  * @flush_op: Type of cache flush operation to perform.
283  *
284  * Issue a cache flush physical range command. This function won't perform any
285  * flush if the GPU doesn't support FLUSH_PA_RANGE command. The flush would be
286  * performed only if the context has a JASID assigned to it.
287  * This function is basically a wrapper for kbase_gpu_cache_flush_pa_range_and_busy_wait().
288  */
289 void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *kctx,
290 			      phys_addr_t phys, size_t size,
291 			      enum kbase_mmu_op_type flush_op);
292 
293 /**
294  * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt.
295  *
296  * @kbdev:       Pointer to the kbase device for which bus fault was reported.
297  * @status:      Value of the GPU_FAULTSTATUS register.
298  * @as_nr:       GPU address space for which the bus fault occurred.
299  *
300  * Process the bus fault interrupt that was reported for a particular GPU
301  * address space.
302  *
303  * Return: zero if the operation was successful, non-zero otherwise.
304  */
305 int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status,
306 		u32 as_nr);
307 
308 /**
309  * kbase_mmu_gpu_fault_interrupt() - Report a GPU fault.
310  *
311  * @kbdev:    Kbase device pointer
312  * @status:   GPU fault status
313  * @as_nr:    Faulty address space
314  * @address:  GPU fault address
315  * @as_valid: true if address space is valid
316  *
317  * This function builds GPU fault information to submit a work
318  * for reporting the details of the fault.
319  */
320 void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
321 		u32 as_nr, u64 address, bool as_valid);
322 
323 /**
324  * kbase_context_mmu_group_id_get - Decode a memory group ID from
325  *                                 base_context_create_flags
326  *
327  * @flags: Bitmask of flags to pass to base_context_init.
328  *
329  * Memory allocated for GPU page tables will come from the returned group.
330  *
331  * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1).
332  */
333 static inline int
kbase_context_mmu_group_id_get(base_context_create_flags const flags)334 kbase_context_mmu_group_id_get(base_context_create_flags const flags)
335 {
336 	KBASE_DEBUG_ASSERT(flags ==
337 			   (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
338 	return (int)BASE_CONTEXT_MMU_GROUP_ID_GET(flags);
339 }
340 
341 #endif /* _KBASE_MMU_H_ */
342