xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vkms/vkms_gem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include <linux/dma-buf.h>
4*4882a593Smuzhiyun #include <linux/shmem_fs.h>
5*4882a593Smuzhiyun #include <linux/vmalloc.h>
6*4882a593Smuzhiyun #include <drm/drm_prime.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "vkms_drv.h"
9*4882a593Smuzhiyun 
__vkms_gem_create(struct drm_device * dev,u64 size)10*4882a593Smuzhiyun static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
11*4882a593Smuzhiyun 						 u64 size)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	struct vkms_gem_object *obj;
14*4882a593Smuzhiyun 	int ret;
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
17*4882a593Smuzhiyun 	if (!obj)
18*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	size = roundup(size, PAGE_SIZE);
21*4882a593Smuzhiyun 	ret = drm_gem_object_init(dev, &obj->gem, size);
22*4882a593Smuzhiyun 	if (ret) {
23*4882a593Smuzhiyun 		kfree(obj);
24*4882a593Smuzhiyun 		return ERR_PTR(ret);
25*4882a593Smuzhiyun 	}
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	mutex_init(&obj->pages_lock);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	return obj;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
vkms_gem_free_object(struct drm_gem_object * obj)32*4882a593Smuzhiyun void vkms_gem_free_object(struct drm_gem_object *obj)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
35*4882a593Smuzhiyun 						   gem);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	WARN_ON(gem->pages);
38*4882a593Smuzhiyun 	WARN_ON(gem->vaddr);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	mutex_destroy(&gem->pages_lock);
41*4882a593Smuzhiyun 	drm_gem_object_release(obj);
42*4882a593Smuzhiyun 	kfree(gem);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
vkms_gem_fault(struct vm_fault * vmf)45*4882a593Smuzhiyun vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
48*4882a593Smuzhiyun 	struct vkms_gem_object *obj = vma->vm_private_data;
49*4882a593Smuzhiyun 	unsigned long vaddr = vmf->address;
50*4882a593Smuzhiyun 	pgoff_t page_offset;
51*4882a593Smuzhiyun 	loff_t num_pages;
52*4882a593Smuzhiyun 	vm_fault_t ret = VM_FAULT_SIGBUS;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
55*4882a593Smuzhiyun 	num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (page_offset > num_pages)
58*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	mutex_lock(&obj->pages_lock);
61*4882a593Smuzhiyun 	if (obj->pages) {
62*4882a593Smuzhiyun 		get_page(obj->pages[page_offset]);
63*4882a593Smuzhiyun 		vmf->page = obj->pages[page_offset];
64*4882a593Smuzhiyun 		ret = 0;
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 	mutex_unlock(&obj->pages_lock);
67*4882a593Smuzhiyun 	if (ret) {
68*4882a593Smuzhiyun 		struct page *page;
69*4882a593Smuzhiyun 		struct address_space *mapping;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 		mapping = file_inode(obj->gem.filp)->i_mapping;
72*4882a593Smuzhiyun 		page = shmem_read_mapping_page(mapping, page_offset);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		if (!IS_ERR(page)) {
75*4882a593Smuzhiyun 			vmf->page = page;
76*4882a593Smuzhiyun 			ret = 0;
77*4882a593Smuzhiyun 		} else {
78*4882a593Smuzhiyun 			switch (PTR_ERR(page)) {
79*4882a593Smuzhiyun 			case -ENOSPC:
80*4882a593Smuzhiyun 			case -ENOMEM:
81*4882a593Smuzhiyun 				ret = VM_FAULT_OOM;
82*4882a593Smuzhiyun 				break;
83*4882a593Smuzhiyun 			case -EBUSY:
84*4882a593Smuzhiyun 				ret = VM_FAULT_RETRY;
85*4882a593Smuzhiyun 				break;
86*4882a593Smuzhiyun 			case -EFAULT:
87*4882a593Smuzhiyun 			case -EINVAL:
88*4882a593Smuzhiyun 				ret = VM_FAULT_SIGBUS;
89*4882a593Smuzhiyun 				break;
90*4882a593Smuzhiyun 			default:
91*4882a593Smuzhiyun 				WARN_ON(PTR_ERR(page));
92*4882a593Smuzhiyun 				ret = VM_FAULT_SIGBUS;
93*4882a593Smuzhiyun 				break;
94*4882a593Smuzhiyun 			}
95*4882a593Smuzhiyun 		}
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	return ret;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
vkms_gem_create(struct drm_device * dev,struct drm_file * file,u32 * handle,u64 size)100*4882a593Smuzhiyun static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
101*4882a593Smuzhiyun 					      struct drm_file *file,
102*4882a593Smuzhiyun 					      u32 *handle,
103*4882a593Smuzhiyun 					      u64 size)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct vkms_gem_object *obj;
106*4882a593Smuzhiyun 	int ret;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (!file || !dev || !handle)
109*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	obj = __vkms_gem_create(dev, size);
112*4882a593Smuzhiyun 	if (IS_ERR(obj))
113*4882a593Smuzhiyun 		return ERR_CAST(obj);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	ret = drm_gem_handle_create(file, &obj->gem, handle);
116*4882a593Smuzhiyun 	if (ret)
117*4882a593Smuzhiyun 		return ERR_PTR(ret);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return &obj->gem;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
vkms_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)122*4882a593Smuzhiyun int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
123*4882a593Smuzhiyun 		     struct drm_mode_create_dumb *args)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct drm_gem_object *gem_obj;
126*4882a593Smuzhiyun 	u64 pitch, size;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (!args || !dev || !file)
129*4882a593Smuzhiyun 		return -EINVAL;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
132*4882a593Smuzhiyun 	size = pitch * args->height;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (!size)
135*4882a593Smuzhiyun 		return -EINVAL;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	gem_obj = vkms_gem_create(dev, file, &args->handle, size);
138*4882a593Smuzhiyun 	if (IS_ERR(gem_obj))
139*4882a593Smuzhiyun 		return PTR_ERR(gem_obj);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	args->size = gem_obj->size;
142*4882a593Smuzhiyun 	args->pitch = pitch;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	drm_gem_object_put(gem_obj);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
_get_pages(struct vkms_gem_object * vkms_obj)151*4882a593Smuzhiyun static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct drm_gem_object *gem_obj = &vkms_obj->gem;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (!vkms_obj->pages) {
156*4882a593Smuzhiyun 		struct page **pages = drm_gem_get_pages(gem_obj);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		if (IS_ERR(pages))
159*4882a593Smuzhiyun 			return pages;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		if (cmpxchg(&vkms_obj->pages, NULL, pages))
162*4882a593Smuzhiyun 			drm_gem_put_pages(gem_obj, pages, false, true);
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return vkms_obj->pages;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
vkms_gem_vunmap(struct drm_gem_object * obj)168*4882a593Smuzhiyun void vkms_gem_vunmap(struct drm_gem_object *obj)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	mutex_lock(&vkms_obj->pages_lock);
173*4882a593Smuzhiyun 	if (vkms_obj->vmap_count < 1) {
174*4882a593Smuzhiyun 		WARN_ON(vkms_obj->vaddr);
175*4882a593Smuzhiyun 		WARN_ON(vkms_obj->pages);
176*4882a593Smuzhiyun 		mutex_unlock(&vkms_obj->pages_lock);
177*4882a593Smuzhiyun 		return;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	vkms_obj->vmap_count--;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (vkms_obj->vmap_count == 0) {
183*4882a593Smuzhiyun 		vunmap(vkms_obj->vaddr);
184*4882a593Smuzhiyun 		vkms_obj->vaddr = NULL;
185*4882a593Smuzhiyun 		drm_gem_put_pages(obj, vkms_obj->pages, false, true);
186*4882a593Smuzhiyun 		vkms_obj->pages = NULL;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	mutex_unlock(&vkms_obj->pages_lock);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
vkms_gem_vmap(struct drm_gem_object * obj)192*4882a593Smuzhiyun int vkms_gem_vmap(struct drm_gem_object *obj)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
195*4882a593Smuzhiyun 	int ret = 0;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	mutex_lock(&vkms_obj->pages_lock);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (!vkms_obj->vaddr) {
200*4882a593Smuzhiyun 		unsigned int n_pages = obj->size >> PAGE_SHIFT;
201*4882a593Smuzhiyun 		struct page **pages = _get_pages(vkms_obj);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		if (IS_ERR(pages)) {
204*4882a593Smuzhiyun 			ret = PTR_ERR(pages);
205*4882a593Smuzhiyun 			goto out;
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
209*4882a593Smuzhiyun 		if (!vkms_obj->vaddr)
210*4882a593Smuzhiyun 			goto err_vmap;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	vkms_obj->vmap_count++;
214*4882a593Smuzhiyun 	goto out;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun err_vmap:
217*4882a593Smuzhiyun 	ret = -ENOMEM;
218*4882a593Smuzhiyun 	drm_gem_put_pages(obj, vkms_obj->pages, false, true);
219*4882a593Smuzhiyun 	vkms_obj->pages = NULL;
220*4882a593Smuzhiyun out:
221*4882a593Smuzhiyun 	mutex_unlock(&vkms_obj->pages_lock);
222*4882a593Smuzhiyun 	return ret;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun struct drm_gem_object *
vkms_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sg)226*4882a593Smuzhiyun vkms_prime_import_sg_table(struct drm_device *dev,
227*4882a593Smuzhiyun 			   struct dma_buf_attachment *attach,
228*4882a593Smuzhiyun 			   struct sg_table *sg)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct vkms_gem_object *obj;
231*4882a593Smuzhiyun 	int npages;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	obj = __vkms_gem_create(dev, attach->dmabuf->size);
234*4882a593Smuzhiyun 	if (IS_ERR(obj))
235*4882a593Smuzhiyun 		return ERR_CAST(obj);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
238*4882a593Smuzhiyun 	DRM_DEBUG_PRIME("Importing %d pages\n", npages);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
241*4882a593Smuzhiyun 	if (!obj->pages) {
242*4882a593Smuzhiyun 		vkms_gem_free_object(&obj->gem);
243*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
247*4882a593Smuzhiyun 	return &obj->gem;
248*4882a593Smuzhiyun }
249