xref: /OK3568_Linux_fs/kernel/drivers/rknpu/include/rknpu_gem.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) Rockchip Electronics Co.Ltd
4  * Author: Felix Zeng <felix.zeng@rock-chips.com>
5  */
6 
7 #ifndef __LINUX_RKNPU_GEM_H
8 #define __LINUX_RKNPU_GEM_H
9 
10 #include <linux/mm_types.h>
11 #include <linux/version.h>
12 
13 #include <drm/drm_device.h>
14 #include <drm/drm_vma_manager.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_mode.h>
17 
18 #if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
19 #include <drm/drm_mem_util.h>
20 #endif
21 
22 #include "rknpu_mm.h"
23 
24 #define to_rknpu_obj(x) container_of(x, struct rknpu_gem_object, base)
25 
26 /*
27  * rknpu drm buffer structure.
28  *
29  * @base: a gem object.
30  *	- a new handle to this gem object would be created
31  *	by drm_gem_handle_create().
32  * @flags: indicate memory type to allocated buffer and cache attribute.
33  * @size: size requested from user, in bytes and this size is aligned
34  *	in page unit.
35  * @cookie: cookie returned by dma_alloc_attrs
36  * @kv_addr: kernel virtual address to allocated memory region.
37  * @dma_addr: bus address(accessed by dma) to allocated memory region.
38  *	- this address could be physical address without IOMMU and
39  *	device address with IOMMU.
40  * @pages: Array of backing pages.
41  * @sgt: Imported sg_table.
42  *
43  * P.S. this object would be transferred to user as kms_bo.handle so
44  *	user can access the buffer through kms_bo.handle.
45  */
46 struct rknpu_gem_object {
47 	struct drm_gem_object base;
48 	unsigned int flags;
49 	unsigned long size;
50 	unsigned long sram_size;
51 	unsigned long nbuf_size;
52 	struct rknpu_mm_obj *sram_obj;
53 	dma_addr_t iova_start;
54 	unsigned long iova_size;
55 	void *cookie;
56 	void __iomem *kv_addr;
57 	dma_addr_t dma_addr;
58 	unsigned long dma_attrs;
59 	unsigned long num_pages;
60 	struct page **pages;
61 	struct sg_table *sgt;
62 	struct drm_mm_node mm_node;
63 };
64 
65 enum rknpu_cache_type {
66 	RKNPU_CACHE_SRAM = 1 << 0,
67 	RKNPU_CACHE_NBUF = 1 << 1,
68 };
69 
70 /* create a new buffer with gem object */
71 struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *dev,
72 						 unsigned int flags,
73 						 unsigned long size,
74 						 unsigned long sram_size);
75 
76 /* destroy a buffer with gem object */
77 void rknpu_gem_object_destroy(struct rknpu_gem_object *rknpu_obj);
78 
79 /* request gem object creation and buffer allocation as the size */
80 int rknpu_gem_create_ioctl(struct drm_device *dev, void *data,
81 			   struct drm_file *file_priv);
82 
83 /* get fake-offset of gem object that can be used with mmap. */
84 int rknpu_gem_map_ioctl(struct drm_device *dev, void *data,
85 			struct drm_file *file_priv);
86 
87 int rknpu_gem_destroy_ioctl(struct drm_device *dev, void *data,
88 			    struct drm_file *file_priv);
89 
90 /*
91  * get rknpu drm object,
92  * gem object reference count would be increased.
93  */
rknpu_gem_object_get(struct drm_gem_object * obj)94 static inline void rknpu_gem_object_get(struct drm_gem_object *obj)
95 {
96 #if KERNEL_VERSION(4, 13, 0) < LINUX_VERSION_CODE
97 	drm_gem_object_get(obj);
98 #else
99 	drm_gem_object_reference(obj);
100 #endif
101 }
102 
103 /*
104  * put rknpu drm object acquired from rknpu_gem_object_find() or rknpu_gem_object_get(),
105  * gem object reference count would be decreased.
106  */
rknpu_gem_object_put(struct drm_gem_object * obj)107 static inline void rknpu_gem_object_put(struct drm_gem_object *obj)
108 {
109 #if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
110 	drm_gem_object_put(obj);
111 #elif KERNEL_VERSION(4, 13, 0) < LINUX_VERSION_CODE
112 	drm_gem_object_put_unlocked(obj);
113 #else
114 	drm_gem_object_unreference_unlocked(obj);
115 #endif
116 }
117 
118 /*
119  * get rknpu drm object from gem handle, this function could be used for
120  * other drivers such as 2d/3d acceleration drivers.
121  * with this function call, gem object reference count would be increased.
122  */
123 static inline struct rknpu_gem_object *
rknpu_gem_object_find(struct drm_file * filp,unsigned int handle)124 rknpu_gem_object_find(struct drm_file *filp, unsigned int handle)
125 {
126 	struct drm_gem_object *obj;
127 
128 	obj = drm_gem_object_lookup(filp, handle);
129 	if (!obj) {
130 		// DRM_ERROR("failed to lookup gem object.\n");
131 		return NULL;
132 	}
133 
134 	rknpu_gem_object_put(obj);
135 
136 	return to_rknpu_obj(obj);
137 }
138 
139 /* get buffer information to memory region allocated by gem. */
140 int rknpu_gem_get_ioctl(struct drm_device *dev, void *data,
141 			struct drm_file *file_priv);
142 
143 /* free gem object. */
144 void rknpu_gem_free_object(struct drm_gem_object *obj);
145 
146 /* create memory region for drm framebuffer. */
147 int rknpu_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
148 			  struct drm_mode_create_dumb *args);
149 
150 #if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
151 /* map memory region for drm framebuffer to user space. */
152 int rknpu_gem_dumb_map_offset(struct drm_file *file_priv,
153 			      struct drm_device *dev, uint32_t handle,
154 			      uint64_t *offset);
155 #endif
156 
157 /* page fault handler and mmap fault address(virtual) to physical memory. */
158 #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
159 vm_fault_t rknpu_gem_fault(struct vm_fault *vmf);
160 #elif KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE
161 int rknpu_gem_fault(struct vm_fault *vmf);
162 #else
163 int rknpu_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
164 #endif
165 
166 int rknpu_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma);
167 
168 /* set vm_flags and we can change the vm attribute to other one at here. */
169 int rknpu_gem_mmap(struct file *filp, struct vm_area_struct *vma);
170 
171 /* low-level interface prime helpers */
172 #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
173 struct drm_gem_object *rknpu_gem_prime_import(struct drm_device *dev,
174 					      struct dma_buf *dma_buf);
175 #endif
176 struct sg_table *rknpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
177 struct drm_gem_object *
178 rknpu_gem_prime_import_sg_table(struct drm_device *dev,
179 				struct dma_buf_attachment *attach,
180 				struct sg_table *sgt);
181 #if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
182 void *rknpu_gem_prime_vmap(struct drm_gem_object *obj);
183 void rknpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
184 #else
185 int rknpu_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
186 void rknpu_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
187 #endif
188 int rknpu_gem_prime_mmap(struct drm_gem_object *obj,
189 			 struct vm_area_struct *vma);
190 
191 int rknpu_gem_sync_ioctl(struct drm_device *dev, void *data,
192 			 struct drm_file *file_priv);
193 
rknpu_gem_alloc_page(size_t nr_pages)194 static inline void *rknpu_gem_alloc_page(size_t nr_pages)
195 {
196 #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
197 	return kvmalloc_array(nr_pages, sizeof(struct page *),
198 			      GFP_KERNEL | __GFP_ZERO);
199 #else
200 	return drm_calloc_large(nr_pages, sizeof(struct page *));
201 #endif
202 }
203 
rknpu_gem_free_page(void * pages)204 static inline void rknpu_gem_free_page(void *pages)
205 {
206 #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
207 	kvfree(pages);
208 #else
209 	drm_free_large(pages);
210 #endif
211 }
212 
213 #endif
214