xref: /OK3568_Linux_fs/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  *
3  * (C) COPYRIGHT 2010, 2012-2017 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15 
16 
17 
18 
19 
20 /**
21  * @file mali_kbase_mem_linux.h
22  * Base kernel memory APIs, Linux implementation.
23  */
24 
25 #ifndef _KBASE_MEM_LINUX_H_
26 #define _KBASE_MEM_LINUX_H_
27 
28 /** A HWC dump mapping */
29 struct kbase_hwc_dma_mapping {
30 	void       *cpu_va;
31 	dma_addr_t  dma_pa;
32 	size_t      size;
33 };
34 
35 struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
36 		u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
37 		u64 *gpu_va);
38 int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages);
39 int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
40 		void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
41 		u64 *flags);
42 u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
43 int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
44 
45 /**
46  * kbase_mem_commit - Change the physical backing size of a region
47  *
48  * @kctx: The kernel context
49  * @gpu_addr: Handle to the memory region
50  * @new_pages: Number of physical pages to back the region with
51  *
52  * Return: 0 on success or error code
53  */
54 int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
55 
56 int kbase_mmap(struct file *file, struct vm_area_struct *vma);
57 
58 /**
59  * kbase_mem_evictable_init - Initialize the Ephemeral memory the eviction
60  * mechanism.
61  * @kctx: The kbase context to initialize.
62  *
63  * Return: Zero on success or -errno on failure.
64  */
65 int kbase_mem_evictable_init(struct kbase_context *kctx);
66 
67 /**
68  * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction
69  * mechanism.
70  * @kctx: The kbase context to de-initialize.
71  */
72 void kbase_mem_evictable_deinit(struct kbase_context *kctx);
73 
74 /**
75  * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation
76  * @kctx:      Context the region belongs to
77  * @reg:       The GPU region
78  * @new_pages: The number of pages after the grow
79  * @old_pages: The number of pages before the grow
80  *
81  * Return: 0 on success, -errno on error.
82  *
83  * Expand the GPU mapping to encompass the new psychical pages which have
84  * been added to the allocation.
85  *
86  * Note: Caller must be holding the region lock.
87  */
88 int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
89 		struct kbase_va_region *reg,
90 		u64 new_pages, u64 old_pages);
91 
92 /**
93  * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
94  * @gpu_alloc: The physical allocation to make evictable
95  *
96  * Return: 0 on success, -errno on error.
97  *
98  * Take the provided region and make all the physical pages within it
99  * reclaimable by the kernel, updating the per-process VM stats as well.
100  * Remove any CPU mappings (as these can't be removed in the shrinker callback
101  * as mmap_lock might already be taken) but leave the GPU mapping intact as
102  * and until the shrinker reclaims the allocation.
103  *
104  * Note: Must be called with the region lock of the containing context.
105  */
106 int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);
107 
108 /**
109  * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for
110  * eviction.
111  * @alloc: The physical allocation to remove eviction eligibility from.
112  *
113  * Return: True if the allocation had its backing restored and false if
114  * it hasn't.
115  *
116  * Make the physical pages in the region no longer reclaimable and update the
117  * per-process stats, if the shrinker has already evicted the memory then
118  * re-allocate it if the region is still alive.
119  *
120  * Note: Must be called with the region lock of the containing context.
121  */
122 bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
123 
124 struct kbase_vmap_struct {
125 	u64 gpu_addr;
126 	struct kbase_mem_phy_alloc *cpu_alloc;
127 	struct kbase_mem_phy_alloc *gpu_alloc;
128 	phys_addr_t *cpu_pages;
129 	phys_addr_t *gpu_pages;
130 	void *addr;
131 	size_t size;
132 	bool is_cached;
133 };
134 
135 
136 /**
137  * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
138  * requested access permissions are supported
139  * @kctx:         Context the VA range belongs to
140  * @gpu_addr:     Start address of VA range
141  * @size:         Size of VA range
142  * @prot_request: Flags indicating how the caller will then access the memory
143  * @map:          Structure to be given to kbase_vunmap() on freeing
144  *
145  * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
146  *
147  * Map a GPU VA Range into the kernel. The VA range must be contained within a
148  * GPU memory region. Appropriate CPU cache-flushing operations are made as
149  * required, dependent on the CPU mapping for the memory region.
150  *
151  * This is safer than using kmap() on the pages directly,
152  * because the pages here are refcounted to prevent freeing (and hence reuse
153  * elsewhere in the system) until an kbase_vunmap()
154  *
155  * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check
156  * whether the region should allow the intended access, and return an error if
157  * disallowed. This is essential for security of imported memory, particularly
158  * a user buf from SHM mapped into the process as RO. In that case, write
159  * access must be checked if the intention is for kernel to write to the
160  * memory.
161  *
162  * The checks are also there to help catch access errors on memory where
163  * security is not a concern: imported memory that is always RW, and memory
164  * that was allocated and owned by the process attached to @kctx. In this case,
165  * it helps to identify memory that was was mapped with the wrong access type.
166  *
167  * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
168  * where either the security of memory is solely dependent on those flags, or
169  * when userspace code was expecting only the GPU to access the memory (e.g. HW
170  * workarounds).
171  *
172  */
173 void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
174 		      unsigned long prot_request, struct kbase_vmap_struct *map);
175 
176 /**
177  * kbase_vmap - Map a GPU VA range into the kernel safely
178  * @kctx:     Context the VA range belongs to
179  * @gpu_addr: Start address of VA range
180  * @size:     Size of VA range
181  * @map:      Structure to be given to kbase_vunmap() on freeing
182  *
183  * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
184  *
185  * Map a GPU VA Range into the kernel. The VA range must be contained within a
186  * GPU memory region. Appropriate CPU cache-flushing operations are made as
187  * required, dependent on the CPU mapping for the memory region.
188  *
189  * This is safer than using kmap() on the pages directly,
190  * because the pages here are refcounted to prevent freeing (and hence reuse
191  * elsewhere in the system) until an kbase_vunmap()
192  *
193  * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no
194  * checks to ensure the security of e.g. imported user bufs from RO SHM.
195  */
196 void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
197 		struct kbase_vmap_struct *map);
198 
199 /**
200  * kbase_vunmap - Unmap a GPU VA range from the kernel
201  * @kctx: Context the VA range belongs to
202  * @map:  Structure describing the mapping from the corresponding kbase_vmap()
203  *        call
204  *
205  * Unmaps a GPU VA range from the kernel, given its @map structure obtained
206  * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as
207  * required, dependent on the CPU mapping for the memory region.
208  *
209  * The reference taken on pages during kbase_vmap() is released.
210  */
211 void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);
212 
213 /** @brief Allocate memory from kernel space and map it onto the GPU
214  *
215  * @param kctx   The context used for the allocation/mapping
216  * @param size   The size of the allocation in bytes
217  * @param handle An opaque structure used to contain the state needed to free the memory
218  * @return the VA for kernel space and GPU MMU
219  */
220 void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle);
221 
222 /** @brief Free/unmap memory allocated by kbase_va_alloc
223  *
224  * @param kctx   The context used for the allocation/mapping
225  * @param handle An opaque structure returned by the kbase_va_alloc function.
226  */
227 void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle);
228 
229 extern const struct vm_operations_struct kbase_vm_ops;
230 
231 #endif				/* _KBASE_MEM_LINUX_H_ */
232