xref: /OK3568_Linux_fs/kernel/drivers/rknpu/include/rknpu_gem.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) Rockchip Electronics Co.Ltd
4*4882a593Smuzhiyun  * Author: Felix Zeng <felix.zeng@rock-chips.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __LINUX_RKNPU_GEM_H
8*4882a593Smuzhiyun #define __LINUX_RKNPU_GEM_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/mm_types.h>
11*4882a593Smuzhiyun #include <linux/version.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <drm/drm_device.h>
14*4882a593Smuzhiyun #include <drm/drm_vma_manager.h>
15*4882a593Smuzhiyun #include <drm/drm_gem.h>
16*4882a593Smuzhiyun #include <drm/drm_mode.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #if KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE
19*4882a593Smuzhiyun #include <drm/drm_mem_util.h>
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "rknpu_mm.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define to_rknpu_obj(x) container_of(x, struct rknpu_gem_object, base)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * rknpu drm buffer structure.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * @base: a gem object.
30*4882a593Smuzhiyun  *	- a new handle to this gem object would be created
31*4882a593Smuzhiyun  *	by drm_gem_handle_create().
32*4882a593Smuzhiyun  * @flags: indicate memory type to allocated buffer and cache attribute.
33*4882a593Smuzhiyun  * @size: size requested from user, in bytes and this size is aligned
34*4882a593Smuzhiyun  *	in page unit.
35*4882a593Smuzhiyun  * @cookie: cookie returned by dma_alloc_attrs
36*4882a593Smuzhiyun  * @kv_addr: kernel virtual address to allocated memory region.
37*4882a593Smuzhiyun  * @dma_addr: bus address(accessed by dma) to allocated memory region.
38*4882a593Smuzhiyun  *	- this address could be physical address without IOMMU and
39*4882a593Smuzhiyun  *	device address with IOMMU.
40*4882a593Smuzhiyun  * @pages: Array of backing pages.
41*4882a593Smuzhiyun  * @sgt: Imported sg_table.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * P.S. this object would be transferred to user as kms_bo.handle so
44*4882a593Smuzhiyun  *	user can access the buffer through kms_bo.handle.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun struct rknpu_gem_object {
47*4882a593Smuzhiyun 	struct drm_gem_object base;
48*4882a593Smuzhiyun 	unsigned int flags;
49*4882a593Smuzhiyun 	unsigned long size;
50*4882a593Smuzhiyun 	unsigned long sram_size;
51*4882a593Smuzhiyun 	unsigned long nbuf_size;
52*4882a593Smuzhiyun 	struct rknpu_mm_obj *sram_obj;
53*4882a593Smuzhiyun 	dma_addr_t iova_start;
54*4882a593Smuzhiyun 	unsigned long iova_size;
55*4882a593Smuzhiyun 	void *cookie;
56*4882a593Smuzhiyun 	void __iomem *kv_addr;
57*4882a593Smuzhiyun 	dma_addr_t dma_addr;
58*4882a593Smuzhiyun 	unsigned long dma_attrs;
59*4882a593Smuzhiyun 	unsigned long num_pages;
60*4882a593Smuzhiyun 	struct page **pages;
61*4882a593Smuzhiyun 	struct sg_table *sgt;
62*4882a593Smuzhiyun 	struct drm_mm_node mm_node;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun enum rknpu_cache_type {
66*4882a593Smuzhiyun 	RKNPU_CACHE_SRAM = 1 << 0,
67*4882a593Smuzhiyun 	RKNPU_CACHE_NBUF = 1 << 1,
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* create a new buffer with gem object */
71*4882a593Smuzhiyun struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *dev,
72*4882a593Smuzhiyun 						 unsigned int flags,
73*4882a593Smuzhiyun 						 unsigned long size,
74*4882a593Smuzhiyun 						 unsigned long sram_size);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* destroy a buffer with gem object */
77*4882a593Smuzhiyun void rknpu_gem_object_destroy(struct rknpu_gem_object *rknpu_obj);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /* request gem object creation and buffer allocation as the size */
80*4882a593Smuzhiyun int rknpu_gem_create_ioctl(struct drm_device *dev, void *data,
81*4882a593Smuzhiyun 			   struct drm_file *file_priv);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* get fake-offset of gem object that can be used with mmap. */
84*4882a593Smuzhiyun int rknpu_gem_map_ioctl(struct drm_device *dev, void *data,
85*4882a593Smuzhiyun 			struct drm_file *file_priv);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun int rknpu_gem_destroy_ioctl(struct drm_device *dev, void *data,
88*4882a593Smuzhiyun 			    struct drm_file *file_priv);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun  * get rknpu drm object,
92*4882a593Smuzhiyun  * gem object reference count would be increased.
93*4882a593Smuzhiyun  */
rknpu_gem_object_get(struct drm_gem_object * obj)94*4882a593Smuzhiyun static inline void rknpu_gem_object_get(struct drm_gem_object *obj)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun #if KERNEL_VERSION(4, 13, 0) < LINUX_VERSION_CODE
97*4882a593Smuzhiyun 	drm_gem_object_get(obj);
98*4882a593Smuzhiyun #else
99*4882a593Smuzhiyun 	drm_gem_object_reference(obj);
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun  * put rknpu drm object acquired from rknpu_gem_object_find() or rknpu_gem_object_get(),
105*4882a593Smuzhiyun  * gem object reference count would be decreased.
106*4882a593Smuzhiyun  */
rknpu_gem_object_put(struct drm_gem_object * obj)107*4882a593Smuzhiyun static inline void rknpu_gem_object_put(struct drm_gem_object *obj)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun #if KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE
110*4882a593Smuzhiyun 	drm_gem_object_put(obj);
111*4882a593Smuzhiyun #elif KERNEL_VERSION(4, 13, 0) < LINUX_VERSION_CODE
112*4882a593Smuzhiyun 	drm_gem_object_put_unlocked(obj);
113*4882a593Smuzhiyun #else
114*4882a593Smuzhiyun 	drm_gem_object_unreference_unlocked(obj);
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * get rknpu drm object from gem handle, this function could be used for
120*4882a593Smuzhiyun  * other drivers such as 2d/3d acceleration drivers.
121*4882a593Smuzhiyun  * with this function call, gem object reference count would be increased.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun static inline struct rknpu_gem_object *
rknpu_gem_object_find(struct drm_file * filp,unsigned int handle)124*4882a593Smuzhiyun rknpu_gem_object_find(struct drm_file *filp, unsigned int handle)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct drm_gem_object *obj;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	obj = drm_gem_object_lookup(filp, handle);
129*4882a593Smuzhiyun 	if (!obj) {
130*4882a593Smuzhiyun 		// DRM_ERROR("failed to lookup gem object.\n");
131*4882a593Smuzhiyun 		return NULL;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	rknpu_gem_object_put(obj);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return to_rknpu_obj(obj);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /* get buffer information to memory region allocated by gem. */
140*4882a593Smuzhiyun int rknpu_gem_get_ioctl(struct drm_device *dev, void *data,
141*4882a593Smuzhiyun 			struct drm_file *file_priv);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* free gem object. */
144*4882a593Smuzhiyun void rknpu_gem_free_object(struct drm_gem_object *obj);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /* create memory region for drm framebuffer. */
147*4882a593Smuzhiyun int rknpu_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
148*4882a593Smuzhiyun 			  struct drm_mode_create_dumb *args);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
151*4882a593Smuzhiyun /* map memory region for drm framebuffer to user space. */
152*4882a593Smuzhiyun int rknpu_gem_dumb_map_offset(struct drm_file *file_priv,
153*4882a593Smuzhiyun 			      struct drm_device *dev, uint32_t handle,
154*4882a593Smuzhiyun 			      uint64_t *offset);
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /* page fault handler and mmap fault address(virtual) to physical memory. */
158*4882a593Smuzhiyun #if KERNEL_VERSION(4, 15, 0) <= LINUX_VERSION_CODE
159*4882a593Smuzhiyun vm_fault_t rknpu_gem_fault(struct vm_fault *vmf);
160*4882a593Smuzhiyun #elif KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE
161*4882a593Smuzhiyun int rknpu_gem_fault(struct vm_fault *vmf);
162*4882a593Smuzhiyun #else
163*4882a593Smuzhiyun int rknpu_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun int rknpu_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /* set vm_flags and we can change the vm attribute to other one at here. */
169*4882a593Smuzhiyun int rknpu_gem_mmap(struct file *filp, struct vm_area_struct *vma);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /* low-level interface prime helpers */
172*4882a593Smuzhiyun #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
173*4882a593Smuzhiyun struct drm_gem_object *rknpu_gem_prime_import(struct drm_device *dev,
174*4882a593Smuzhiyun 					      struct dma_buf *dma_buf);
175*4882a593Smuzhiyun #endif
176*4882a593Smuzhiyun struct sg_table *rknpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
177*4882a593Smuzhiyun struct drm_gem_object *
178*4882a593Smuzhiyun rknpu_gem_prime_import_sg_table(struct drm_device *dev,
179*4882a593Smuzhiyun 				struct dma_buf_attachment *attach,
180*4882a593Smuzhiyun 				struct sg_table *sgt);
181*4882a593Smuzhiyun #if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
182*4882a593Smuzhiyun void *rknpu_gem_prime_vmap(struct drm_gem_object *obj);
183*4882a593Smuzhiyun void rknpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
184*4882a593Smuzhiyun #else
185*4882a593Smuzhiyun int rknpu_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
186*4882a593Smuzhiyun void rknpu_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
187*4882a593Smuzhiyun #endif
188*4882a593Smuzhiyun int rknpu_gem_prime_mmap(struct drm_gem_object *obj,
189*4882a593Smuzhiyun 			 struct vm_area_struct *vma);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun int rknpu_gem_sync_ioctl(struct drm_device *dev, void *data,
192*4882a593Smuzhiyun 			 struct drm_file *file_priv);
193*4882a593Smuzhiyun 
rknpu_gem_alloc_page(size_t nr_pages)194*4882a593Smuzhiyun static inline void *rknpu_gem_alloc_page(size_t nr_pages)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
197*4882a593Smuzhiyun 	return kvmalloc_array(nr_pages, sizeof(struct page *),
198*4882a593Smuzhiyun 			      GFP_KERNEL | __GFP_ZERO);
199*4882a593Smuzhiyun #else
200*4882a593Smuzhiyun 	return drm_calloc_large(nr_pages, sizeof(struct page *));
201*4882a593Smuzhiyun #endif
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
rknpu_gem_free_page(void * pages)204*4882a593Smuzhiyun static inline void rknpu_gem_free_page(void *pages)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun #if KERNEL_VERSION(4, 13, 0) <= LINUX_VERSION_CODE
207*4882a593Smuzhiyun 	kvfree(pages);
208*4882a593Smuzhiyun #else
209*4882a593Smuzhiyun 	drm_free_large(pages);
210*4882a593Smuzhiyun #endif
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #endif
214