xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun  * the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun  * of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  **************************************************************************/
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "vmwgfx_drv.h"
29*4882a593Smuzhiyun #include <drm/vmwgfx_drm.h>
30*4882a593Smuzhiyun #include "vmwgfx_kms.h"
31*4882a593Smuzhiyun #include "device_include/svga3d_caps.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct svga_3d_compat_cap {
34*4882a593Smuzhiyun 	SVGA3dCapsRecordHeader header;
35*4882a593Smuzhiyun 	SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
vmw_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)38*4882a593Smuzhiyun int vmw_getparam_ioctl(struct drm_device *dev, void *data,
39*4882a593Smuzhiyun 		       struct drm_file *file_priv)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
42*4882a593Smuzhiyun 	struct drm_vmw_getparam_arg *param =
43*4882a593Smuzhiyun 	    (struct drm_vmw_getparam_arg *)data;
44*4882a593Smuzhiyun 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	switch (param->param) {
47*4882a593Smuzhiyun 	case DRM_VMW_PARAM_NUM_STREAMS:
48*4882a593Smuzhiyun 		param->value = vmw_overlay_num_overlays(dev_priv);
49*4882a593Smuzhiyun 		break;
50*4882a593Smuzhiyun 	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
51*4882a593Smuzhiyun 		param->value = vmw_overlay_num_free_overlays(dev_priv);
52*4882a593Smuzhiyun 		break;
53*4882a593Smuzhiyun 	case DRM_VMW_PARAM_3D:
54*4882a593Smuzhiyun 		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
55*4882a593Smuzhiyun 		break;
56*4882a593Smuzhiyun 	case DRM_VMW_PARAM_HW_CAPS:
57*4882a593Smuzhiyun 		param->value = dev_priv->capabilities;
58*4882a593Smuzhiyun 		break;
59*4882a593Smuzhiyun 	case DRM_VMW_PARAM_HW_CAPS2:
60*4882a593Smuzhiyun 		param->value = dev_priv->capabilities2;
61*4882a593Smuzhiyun 		break;
62*4882a593Smuzhiyun 	case DRM_VMW_PARAM_FIFO_CAPS:
63*4882a593Smuzhiyun 		param->value = dev_priv->fifo.capabilities;
64*4882a593Smuzhiyun 		break;
65*4882a593Smuzhiyun 	case DRM_VMW_PARAM_MAX_FB_SIZE:
66*4882a593Smuzhiyun 		param->value = dev_priv->prim_bb_mem;
67*4882a593Smuzhiyun 		break;
68*4882a593Smuzhiyun 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
69*4882a593Smuzhiyun 	{
70*4882a593Smuzhiyun 		u32 *fifo_mem = dev_priv->mmio_virt;
71*4882a593Smuzhiyun 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
74*4882a593Smuzhiyun 			param->value = SVGA3D_HWVERSION_WS8_B1;
75*4882a593Smuzhiyun 			break;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		param->value =
79*4882a593Smuzhiyun 			vmw_mmio_read(fifo_mem +
80*4882a593Smuzhiyun 				      ((fifo->capabilities &
81*4882a593Smuzhiyun 					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
82*4882a593Smuzhiyun 				       SVGA_FIFO_3D_HWVERSION_REVISED :
83*4882a593Smuzhiyun 				       SVGA_FIFO_3D_HWVERSION));
84*4882a593Smuzhiyun 		break;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
87*4882a593Smuzhiyun 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
88*4882a593Smuzhiyun 		    !vmw_fp->gb_aware)
89*4882a593Smuzhiyun 			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
90*4882a593Smuzhiyun 		else
91*4882a593Smuzhiyun 			param->value = dev_priv->memory_size;
92*4882a593Smuzhiyun 		break;
93*4882a593Smuzhiyun 	case DRM_VMW_PARAM_3D_CAPS_SIZE:
94*4882a593Smuzhiyun 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
95*4882a593Smuzhiyun 		    vmw_fp->gb_aware)
96*4882a593Smuzhiyun 			param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
97*4882a593Smuzhiyun 		else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
98*4882a593Smuzhiyun 			param->value = sizeof(struct svga_3d_compat_cap) +
99*4882a593Smuzhiyun 				sizeof(uint32_t);
100*4882a593Smuzhiyun 		else
101*4882a593Smuzhiyun 			param->value = (SVGA_FIFO_3D_CAPS_LAST -
102*4882a593Smuzhiyun 					SVGA_FIFO_3D_CAPS + 1) *
103*4882a593Smuzhiyun 				sizeof(uint32_t);
104*4882a593Smuzhiyun 		break;
105*4882a593Smuzhiyun 	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
106*4882a593Smuzhiyun 		vmw_fp->gb_aware = true;
107*4882a593Smuzhiyun 		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
108*4882a593Smuzhiyun 		break;
109*4882a593Smuzhiyun 	case DRM_VMW_PARAM_MAX_MOB_SIZE:
110*4882a593Smuzhiyun 		param->value = dev_priv->max_mob_size;
111*4882a593Smuzhiyun 		break;
112*4882a593Smuzhiyun 	case DRM_VMW_PARAM_SCREEN_TARGET:
113*4882a593Smuzhiyun 		param->value =
114*4882a593Smuzhiyun 			(dev_priv->active_display_unit == vmw_du_screen_target);
115*4882a593Smuzhiyun 		break;
116*4882a593Smuzhiyun 	case DRM_VMW_PARAM_DX:
117*4882a593Smuzhiyun 		param->value = has_sm4_context(dev_priv);
118*4882a593Smuzhiyun 		break;
119*4882a593Smuzhiyun 	case DRM_VMW_PARAM_SM4_1:
120*4882a593Smuzhiyun 		param->value = has_sm4_1_context(dev_priv);
121*4882a593Smuzhiyun 		break;
122*4882a593Smuzhiyun 	case DRM_VMW_PARAM_SM5:
123*4882a593Smuzhiyun 		param->value = has_sm5_context(dev_priv);
124*4882a593Smuzhiyun 		break;
125*4882a593Smuzhiyun 	default:
126*4882a593Smuzhiyun 		return -EINVAL;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
vmw_mask_legacy_multisample(unsigned int cap,u32 fmt_value)132*4882a593Smuzhiyun static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
136*4882a593Smuzhiyun 	 * to check the sample count supported by virtual device. Since there
137*4882a593Smuzhiyun 	 * never was support for multisample count for backing MOB return 0.
138*4882a593Smuzhiyun 	 *
139*4882a593Smuzhiyun 	 * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
140*4882a593Smuzhiyun 	 * device.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	if (cap == SVGA3D_DEVCAP_DEAD5)
143*4882a593Smuzhiyun 		return 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	return fmt_value;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
vmw_fill_compat_cap(struct vmw_private * dev_priv,void * bounce,size_t size)148*4882a593Smuzhiyun static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
149*4882a593Smuzhiyun 			       size_t size)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct svga_3d_compat_cap *compat_cap =
152*4882a593Smuzhiyun 		(struct svga_3d_compat_cap *) bounce;
153*4882a593Smuzhiyun 	unsigned int i;
154*4882a593Smuzhiyun 	size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
155*4882a593Smuzhiyun 	unsigned int max_size;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (size < pair_offset)
158*4882a593Smuzhiyun 		return -EINVAL;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (max_size > SVGA3D_DEVCAP_MAX)
163*4882a593Smuzhiyun 		max_size = SVGA3D_DEVCAP_MAX;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	compat_cap->header.length =
166*4882a593Smuzhiyun 		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
167*4882a593Smuzhiyun 	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	spin_lock(&dev_priv->cap_lock);
170*4882a593Smuzhiyun 	for (i = 0; i < max_size; ++i) {
171*4882a593Smuzhiyun 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
172*4882a593Smuzhiyun 		compat_cap->pairs[i][0] = i;
173*4882a593Smuzhiyun 		compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
174*4882a593Smuzhiyun 			(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	spin_unlock(&dev_priv->cap_lock);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 
vmw_get_cap_3d_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)182*4882a593Smuzhiyun int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
183*4882a593Smuzhiyun 			 struct drm_file *file_priv)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct drm_vmw_get_3d_cap_arg *arg =
186*4882a593Smuzhiyun 		(struct drm_vmw_get_3d_cap_arg *) data;
187*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
188*4882a593Smuzhiyun 	uint32_t size;
189*4882a593Smuzhiyun 	u32 *fifo_mem;
190*4882a593Smuzhiyun 	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
191*4882a593Smuzhiyun 	void *bounce;
192*4882a593Smuzhiyun 	int ret;
193*4882a593Smuzhiyun 	bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
194*4882a593Smuzhiyun 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
197*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
198*4882a593Smuzhiyun 		return -EINVAL;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (gb_objects && vmw_fp->gb_aware)
202*4882a593Smuzhiyun 		size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
203*4882a593Smuzhiyun 	else if (gb_objects)
204*4882a593Smuzhiyun 		size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
205*4882a593Smuzhiyun 	else
206*4882a593Smuzhiyun 		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
207*4882a593Smuzhiyun 			sizeof(uint32_t);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (arg->max_size < size)
210*4882a593Smuzhiyun 		size = arg->max_size;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	bounce = vzalloc(size);
213*4882a593Smuzhiyun 	if (unlikely(bounce == NULL)) {
214*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
215*4882a593Smuzhiyun 		return -ENOMEM;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (gb_objects && vmw_fp->gb_aware) {
219*4882a593Smuzhiyun 		int i, num;
220*4882a593Smuzhiyun 		uint32_t *bounce32 = (uint32_t *) bounce;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		num = size / sizeof(uint32_t);
223*4882a593Smuzhiyun 		if (num > SVGA3D_DEVCAP_MAX)
224*4882a593Smuzhiyun 			num = SVGA3D_DEVCAP_MAX;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		spin_lock(&dev_priv->cap_lock);
227*4882a593Smuzhiyun 		for (i = 0; i < num; ++i) {
228*4882a593Smuzhiyun 			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
229*4882a593Smuzhiyun 			*bounce32++ = vmw_mask_legacy_multisample
230*4882a593Smuzhiyun 				(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
231*4882a593Smuzhiyun 		}
232*4882a593Smuzhiyun 		spin_unlock(&dev_priv->cap_lock);
233*4882a593Smuzhiyun 	} else if (gb_objects) {
234*4882a593Smuzhiyun 		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
235*4882a593Smuzhiyun 		if (unlikely(ret != 0))
236*4882a593Smuzhiyun 			goto out_err;
237*4882a593Smuzhiyun 	} else {
238*4882a593Smuzhiyun 		fifo_mem = dev_priv->mmio_virt;
239*4882a593Smuzhiyun 		memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	ret = copy_to_user(buffer, bounce, size);
243*4882a593Smuzhiyun 	if (ret)
244*4882a593Smuzhiyun 		ret = -EFAULT;
245*4882a593Smuzhiyun out_err:
246*4882a593Smuzhiyun 	vfree(bounce);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (unlikely(ret != 0))
249*4882a593Smuzhiyun 		DRM_ERROR("Failed to report 3D caps info.\n");
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return ret;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
vmw_present_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)254*4882a593Smuzhiyun int vmw_present_ioctl(struct drm_device *dev, void *data,
255*4882a593Smuzhiyun 		      struct drm_file *file_priv)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
258*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
259*4882a593Smuzhiyun 	struct drm_vmw_present_arg *arg =
260*4882a593Smuzhiyun 		(struct drm_vmw_present_arg *)data;
261*4882a593Smuzhiyun 	struct vmw_surface *surface;
262*4882a593Smuzhiyun 	struct drm_vmw_rect __user *clips_ptr;
263*4882a593Smuzhiyun 	struct drm_vmw_rect *clips = NULL;
264*4882a593Smuzhiyun 	struct drm_framebuffer *fb;
265*4882a593Smuzhiyun 	struct vmw_framebuffer *vfb;
266*4882a593Smuzhiyun 	struct vmw_resource *res;
267*4882a593Smuzhiyun 	uint32_t num_clips;
268*4882a593Smuzhiyun 	int ret;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	num_clips = arg->num_clips;
271*4882a593Smuzhiyun 	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (unlikely(num_clips == 0))
274*4882a593Smuzhiyun 		return 0;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (clips_ptr == NULL) {
277*4882a593Smuzhiyun 		VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
278*4882a593Smuzhiyun 		ret = -EINVAL;
279*4882a593Smuzhiyun 		goto out_clips;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
283*4882a593Smuzhiyun 	if (clips == NULL) {
284*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate clip rect list.\n");
285*4882a593Smuzhiyun 		ret = -ENOMEM;
286*4882a593Smuzhiyun 		goto out_clips;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
290*4882a593Smuzhiyun 	if (ret) {
291*4882a593Smuzhiyun 		DRM_ERROR("Failed to copy clip rects from userspace.\n");
292*4882a593Smuzhiyun 		ret = -EFAULT;
293*4882a593Smuzhiyun 		goto out_no_copy;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	drm_modeset_lock_all(dev);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
299*4882a593Smuzhiyun 	if (!fb) {
300*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid framebuffer id.\n");
301*4882a593Smuzhiyun 		ret = -ENOENT;
302*4882a593Smuzhiyun 		goto out_no_fb;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	vfb = vmw_framebuffer_to_vfb(fb);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
307*4882a593Smuzhiyun 	if (unlikely(ret != 0))
308*4882a593Smuzhiyun 		goto out_no_ttm_lock;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
311*4882a593Smuzhiyun 					      user_surface_converter,
312*4882a593Smuzhiyun 					      &res);
313*4882a593Smuzhiyun 	if (ret)
314*4882a593Smuzhiyun 		goto out_no_surface;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	surface = vmw_res_to_srf(res);
317*4882a593Smuzhiyun 	ret = vmw_kms_present(dev_priv, file_priv,
318*4882a593Smuzhiyun 			      vfb, surface, arg->sid,
319*4882a593Smuzhiyun 			      arg->dest_x, arg->dest_y,
320*4882a593Smuzhiyun 			      clips, num_clips);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/* vmw_user_surface_lookup takes one ref so does new_fb */
323*4882a593Smuzhiyun 	vmw_surface_unreference(&surface);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun out_no_surface:
326*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
327*4882a593Smuzhiyun out_no_ttm_lock:
328*4882a593Smuzhiyun 	drm_framebuffer_put(fb);
329*4882a593Smuzhiyun out_no_fb:
330*4882a593Smuzhiyun 	drm_modeset_unlock_all(dev);
331*4882a593Smuzhiyun out_no_copy:
332*4882a593Smuzhiyun 	kfree(clips);
333*4882a593Smuzhiyun out_clips:
334*4882a593Smuzhiyun 	return ret;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
vmw_present_readback_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)337*4882a593Smuzhiyun int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
338*4882a593Smuzhiyun 			       struct drm_file *file_priv)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
341*4882a593Smuzhiyun 	struct drm_vmw_present_readback_arg *arg =
342*4882a593Smuzhiyun 		(struct drm_vmw_present_readback_arg *)data;
343*4882a593Smuzhiyun 	struct drm_vmw_fence_rep __user *user_fence_rep =
344*4882a593Smuzhiyun 		(struct drm_vmw_fence_rep __user *)
345*4882a593Smuzhiyun 		(unsigned long)arg->fence_rep;
346*4882a593Smuzhiyun 	struct drm_vmw_rect __user *clips_ptr;
347*4882a593Smuzhiyun 	struct drm_vmw_rect *clips = NULL;
348*4882a593Smuzhiyun 	struct drm_framebuffer *fb;
349*4882a593Smuzhiyun 	struct vmw_framebuffer *vfb;
350*4882a593Smuzhiyun 	uint32_t num_clips;
351*4882a593Smuzhiyun 	int ret;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	num_clips = arg->num_clips;
354*4882a593Smuzhiyun 	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (unlikely(num_clips == 0))
357*4882a593Smuzhiyun 		return 0;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (clips_ptr == NULL) {
360*4882a593Smuzhiyun 		VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
361*4882a593Smuzhiyun 		ret = -EINVAL;
362*4882a593Smuzhiyun 		goto out_clips;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
366*4882a593Smuzhiyun 	if (clips == NULL) {
367*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate clip rect list.\n");
368*4882a593Smuzhiyun 		ret = -ENOMEM;
369*4882a593Smuzhiyun 		goto out_clips;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
373*4882a593Smuzhiyun 	if (ret) {
374*4882a593Smuzhiyun 		DRM_ERROR("Failed to copy clip rects from userspace.\n");
375*4882a593Smuzhiyun 		ret = -EFAULT;
376*4882a593Smuzhiyun 		goto out_no_copy;
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	drm_modeset_lock_all(dev);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
382*4882a593Smuzhiyun 	if (!fb) {
383*4882a593Smuzhiyun 		VMW_DEBUG_USER("Invalid framebuffer id.\n");
384*4882a593Smuzhiyun 		ret = -ENOENT;
385*4882a593Smuzhiyun 		goto out_no_fb;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	vfb = vmw_framebuffer_to_vfb(fb);
389*4882a593Smuzhiyun 	if (!vfb->bo) {
390*4882a593Smuzhiyun 		VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
391*4882a593Smuzhiyun 		ret = -EINVAL;
392*4882a593Smuzhiyun 		goto out_no_ttm_lock;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
396*4882a593Smuzhiyun 	if (unlikely(ret != 0))
397*4882a593Smuzhiyun 		goto out_no_ttm_lock;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	ret = vmw_kms_readback(dev_priv, file_priv,
400*4882a593Smuzhiyun 			       vfb, user_fence_rep,
401*4882a593Smuzhiyun 			       clips, num_clips);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
404*4882a593Smuzhiyun out_no_ttm_lock:
405*4882a593Smuzhiyun 	drm_framebuffer_put(fb);
406*4882a593Smuzhiyun out_no_fb:
407*4882a593Smuzhiyun 	drm_modeset_unlock_all(dev);
408*4882a593Smuzhiyun out_no_copy:
409*4882a593Smuzhiyun 	kfree(clips);
410*4882a593Smuzhiyun out_clips:
411*4882a593Smuzhiyun 	return ret;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun  * vmw_fops_poll - wrapper around the drm_poll function
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * @filp: See the linux fops poll documentation.
419*4882a593Smuzhiyun  * @wait: See the linux fops poll documentation.
420*4882a593Smuzhiyun  *
421*4882a593Smuzhiyun  * Wrapper around the drm_poll function that makes sure the device is
422*4882a593Smuzhiyun  * processing the fifo if drm_poll decides to wait.
423*4882a593Smuzhiyun  */
vmw_fops_poll(struct file * filp,struct poll_table_struct * wait)424*4882a593Smuzhiyun __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct drm_file *file_priv = filp->private_data;
427*4882a593Smuzhiyun 	struct vmw_private *dev_priv =
428*4882a593Smuzhiyun 		vmw_priv(file_priv->minor->dev);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
431*4882a593Smuzhiyun 	return drm_poll(filp, wait);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun  * vmw_fops_read - wrapper around the drm_read function
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * @filp: See the linux fops read documentation.
439*4882a593Smuzhiyun  * @buffer: See the linux fops read documentation.
440*4882a593Smuzhiyun  * @count: See the linux fops read documentation.
441*4882a593Smuzhiyun  * offset: See the linux fops read documentation.
442*4882a593Smuzhiyun  *
443*4882a593Smuzhiyun  * Wrapper around the drm_read function that makes sure the device is
444*4882a593Smuzhiyun  * processing the fifo if drm_read decides to wait.
445*4882a593Smuzhiyun  */
vmw_fops_read(struct file * filp,char __user * buffer,size_t count,loff_t * offset)446*4882a593Smuzhiyun ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
447*4882a593Smuzhiyun 		      size_t count, loff_t *offset)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct drm_file *file_priv = filp->private_data;
450*4882a593Smuzhiyun 	struct vmw_private *dev_priv =
451*4882a593Smuzhiyun 		vmw_priv(file_priv->minor->dev);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
454*4882a593Smuzhiyun 	return drm_read(filp, buffer, count, offset);
455*4882a593Smuzhiyun }
456