1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2015 Red Hat, Inc.
3*4882a593Smuzhiyun * All Rights Reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining
6*4882a593Smuzhiyun * a copy of this software and associated documentation files (the
7*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun * distribute, sublicense, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun * the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
14*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial
15*4882a593Smuzhiyun * portions of the Software.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20*4882a593Smuzhiyun * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21*4882a593Smuzhiyun * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22*4882a593Smuzhiyun * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23*4882a593Smuzhiyun * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <linux/dma-mapping.h>
27*4882a593Smuzhiyun #include <linux/moduleparam.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "virtgpu_drv.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static int virtio_gpu_virglrenderer_workaround = 1;
32*4882a593Smuzhiyun module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33*4882a593Smuzhiyun
virtio_gpu_resource_id_get(struct virtio_gpu_device * vgdev,uint32_t * resid)34*4882a593Smuzhiyun static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
35*4882a593Smuzhiyun uint32_t *resid)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun if (virtio_gpu_virglrenderer_workaround) {
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Hack to avoid re-using resource IDs.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * virglrenderer versions up to (and including) 0.7.0
42*4882a593Smuzhiyun * can't deal with that. virglrenderer commit
43*4882a593Smuzhiyun * "f91a9dd35715 Fix unlinking resources from hash
44*4882a593Smuzhiyun * table." (Feb 2019) fixes the bug.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun static atomic_t seqno = ATOMIC_INIT(0);
47*4882a593Smuzhiyun int handle = atomic_inc_return(&seqno);
48*4882a593Smuzhiyun *resid = handle + 1;
49*4882a593Smuzhiyun } else {
50*4882a593Smuzhiyun int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
51*4882a593Smuzhiyun if (handle < 0)
52*4882a593Smuzhiyun return handle;
53*4882a593Smuzhiyun *resid = handle + 1;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
virtio_gpu_resource_id_put(struct virtio_gpu_device * vgdev,uint32_t id)58*4882a593Smuzhiyun static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun if (!virtio_gpu_virglrenderer_workaround) {
61*4882a593Smuzhiyun ida_free(&vgdev->resource_ida, id - 1);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
virtio_gpu_cleanup_object(struct virtio_gpu_object * bo)65*4882a593Smuzhiyun void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
70*4882a593Smuzhiyun if (virtio_gpu_is_shmem(bo)) {
71*4882a593Smuzhiyun struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (shmem->pages) {
74*4882a593Smuzhiyun if (shmem->mapped) {
75*4882a593Smuzhiyun dma_unmap_sgtable(vgdev->vdev->dev.parent,
76*4882a593Smuzhiyun shmem->pages, DMA_TO_DEVICE, 0);
77*4882a593Smuzhiyun shmem->mapped = 0;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun sg_free_table(shmem->pages);
81*4882a593Smuzhiyun kfree(shmem->pages);
82*4882a593Smuzhiyun shmem->pages = NULL;
83*4882a593Smuzhiyun drm_gem_shmem_unpin(&bo->base.base);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun drm_gem_shmem_free_object(&bo->base.base);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
virtio_gpu_free_object(struct drm_gem_object * obj)90*4882a593Smuzhiyun static void virtio_gpu_free_object(struct drm_gem_object *obj)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
93*4882a593Smuzhiyun struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (bo->created) {
96*4882a593Smuzhiyun virtio_gpu_cmd_unref_resource(vgdev, bo);
97*4882a593Smuzhiyun virtio_gpu_notify(vgdev);
98*4882a593Smuzhiyun /* completion handler calls virtio_gpu_cleanup_object() */
99*4882a593Smuzhiyun return;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun virtio_gpu_cleanup_object(bo);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
105*4882a593Smuzhiyun .free = virtio_gpu_free_object,
106*4882a593Smuzhiyun .open = virtio_gpu_gem_object_open,
107*4882a593Smuzhiyun .close = virtio_gpu_gem_object_close,
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun .print_info = drm_gem_shmem_print_info,
110*4882a593Smuzhiyun .pin = drm_gem_shmem_pin,
111*4882a593Smuzhiyun .unpin = drm_gem_shmem_unpin,
112*4882a593Smuzhiyun .get_sg_table = drm_gem_shmem_get_sg_table,
113*4882a593Smuzhiyun .vmap = drm_gem_shmem_vmap,
114*4882a593Smuzhiyun .vunmap = drm_gem_shmem_vunmap,
115*4882a593Smuzhiyun .mmap = drm_gem_shmem_mmap,
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
virtio_gpu_is_shmem(struct virtio_gpu_object * bo)118*4882a593Smuzhiyun bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
virtio_gpu_create_object(struct drm_device * dev,size_t size)123*4882a593Smuzhiyun struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
124*4882a593Smuzhiyun size_t size)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct virtio_gpu_object_shmem *shmem;
127*4882a593Smuzhiyun struct drm_gem_shmem_object *dshmem;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
130*4882a593Smuzhiyun if (!shmem)
131*4882a593Smuzhiyun return NULL;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun dshmem = &shmem->base.base;
134*4882a593Smuzhiyun dshmem->base.funcs = &virtio_gpu_shmem_funcs;
135*4882a593Smuzhiyun dshmem->map_cached = true;
136*4882a593Smuzhiyun return &dshmem->base;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
virtio_gpu_object_shmem_init(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_mem_entry ** ents,unsigned int * nents)139*4882a593Smuzhiyun static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
140*4882a593Smuzhiyun struct virtio_gpu_object *bo,
141*4882a593Smuzhiyun struct virtio_gpu_mem_entry **ents,
142*4882a593Smuzhiyun unsigned int *nents)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
145*4882a593Smuzhiyun struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
146*4882a593Smuzhiyun struct scatterlist *sg;
147*4882a593Smuzhiyun int si, ret;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun ret = drm_gem_shmem_pin(&bo->base.base);
150*4882a593Smuzhiyun if (ret < 0)
151*4882a593Smuzhiyun return -EINVAL;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
155*4882a593Smuzhiyun * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
156*4882a593Smuzhiyun * dma-ops. This is discouraged for other drivers, but should be fine
157*4882a593Smuzhiyun * since virtio_gpu doesn't support dma-buf import from other devices.
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
160*4882a593Smuzhiyun if (!shmem->pages) {
161*4882a593Smuzhiyun drm_gem_shmem_unpin(&bo->base.base);
162*4882a593Smuzhiyun return -EINVAL;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (use_dma_api) {
166*4882a593Smuzhiyun ret = dma_map_sgtable(vgdev->vdev->dev.parent,
167*4882a593Smuzhiyun shmem->pages, DMA_TO_DEVICE, 0);
168*4882a593Smuzhiyun if (ret)
169*4882a593Smuzhiyun return ret;
170*4882a593Smuzhiyun *nents = shmem->mapped = shmem->pages->nents;
171*4882a593Smuzhiyun } else {
172*4882a593Smuzhiyun *nents = shmem->pages->orig_nents;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun *ents = kvmalloc_array(*nents,
176*4882a593Smuzhiyun sizeof(struct virtio_gpu_mem_entry),
177*4882a593Smuzhiyun GFP_KERNEL);
178*4882a593Smuzhiyun if (!(*ents)) {
179*4882a593Smuzhiyun DRM_ERROR("failed to allocate ent list\n");
180*4882a593Smuzhiyun return -ENOMEM;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (use_dma_api) {
184*4882a593Smuzhiyun for_each_sgtable_dma_sg(shmem->pages, sg, si) {
185*4882a593Smuzhiyun (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
186*4882a593Smuzhiyun (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
187*4882a593Smuzhiyun (*ents)[si].padding = 0;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun } else {
190*4882a593Smuzhiyun for_each_sgtable_sg(shmem->pages, sg, si) {
191*4882a593Smuzhiyun (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
192*4882a593Smuzhiyun (*ents)[si].length = cpu_to_le32(sg->length);
193*4882a593Smuzhiyun (*ents)[si].padding = 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
virtio_gpu_object_create(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_params * params,struct virtio_gpu_object ** bo_ptr,struct virtio_gpu_fence * fence)200*4882a593Smuzhiyun int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
201*4882a593Smuzhiyun struct virtio_gpu_object_params *params,
202*4882a593Smuzhiyun struct virtio_gpu_object **bo_ptr,
203*4882a593Smuzhiyun struct virtio_gpu_fence *fence)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun struct virtio_gpu_object_array *objs = NULL;
206*4882a593Smuzhiyun struct drm_gem_shmem_object *shmem_obj;
207*4882a593Smuzhiyun struct virtio_gpu_object *bo;
208*4882a593Smuzhiyun struct virtio_gpu_mem_entry *ents;
209*4882a593Smuzhiyun unsigned int nents;
210*4882a593Smuzhiyun int ret;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun *bo_ptr = NULL;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun params->size = roundup(params->size, PAGE_SIZE);
215*4882a593Smuzhiyun shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
216*4882a593Smuzhiyun if (IS_ERR(shmem_obj))
217*4882a593Smuzhiyun return PTR_ERR(shmem_obj);
218*4882a593Smuzhiyun bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
221*4882a593Smuzhiyun if (ret < 0)
222*4882a593Smuzhiyun goto err_free_gem;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun bo->dumb = params->dumb;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (fence) {
227*4882a593Smuzhiyun ret = -ENOMEM;
228*4882a593Smuzhiyun objs = virtio_gpu_array_alloc(1);
229*4882a593Smuzhiyun if (!objs)
230*4882a593Smuzhiyun goto err_put_id;
231*4882a593Smuzhiyun virtio_gpu_array_add_obj(objs, &bo->base.base);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun ret = virtio_gpu_array_lock_resv(objs);
234*4882a593Smuzhiyun if (ret != 0)
235*4882a593Smuzhiyun goto err_put_objs;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (params->virgl) {
239*4882a593Smuzhiyun virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
240*4882a593Smuzhiyun objs, fence);
241*4882a593Smuzhiyun } else {
242*4882a593Smuzhiyun virtio_gpu_cmd_create_resource(vgdev, bo, params,
243*4882a593Smuzhiyun objs, fence);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
247*4882a593Smuzhiyun if (ret != 0) {
248*4882a593Smuzhiyun virtio_gpu_free_object(&shmem_obj->base);
249*4882a593Smuzhiyun return ret;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun virtio_gpu_object_attach(vgdev, bo, ents, nents);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun *bo_ptr = bo;
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun err_put_objs:
258*4882a593Smuzhiyun virtio_gpu_array_put_free(objs);
259*4882a593Smuzhiyun err_put_id:
260*4882a593Smuzhiyun virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
261*4882a593Smuzhiyun err_free_gem:
262*4882a593Smuzhiyun drm_gem_shmem_free_object(&shmem_obj->base);
263*4882a593Smuzhiyun return ret;
264*4882a593Smuzhiyun }
265