xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /**************************************************************************
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright © 2007 David Airlie
4*4882a593Smuzhiyun  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  * All Rights Reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
9*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
10*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
11*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
12*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun  * the following conditions:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
16*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
17*4882a593Smuzhiyun  * of the Software.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  **************************************************************************/
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/pci.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <drm/drm_fourcc.h>
32*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "vmwgfx_drv.h"
35*4882a593Smuzhiyun #include "vmwgfx_kms.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define VMW_DIRTY_DELAY (HZ / 30)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun struct vmw_fb_par {
40*4882a593Smuzhiyun 	struct vmw_private *vmw_priv;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	void *vmalloc;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	struct mutex bo_mutex;
45*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
46*4882a593Smuzhiyun 	unsigned bo_size;
47*4882a593Smuzhiyun 	struct drm_framebuffer *set_fb;
48*4882a593Smuzhiyun 	struct drm_display_mode *set_mode;
49*4882a593Smuzhiyun 	u32 fb_x;
50*4882a593Smuzhiyun 	u32 fb_y;
51*4882a593Smuzhiyun 	bool bo_iowrite;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	u32 pseudo_palette[17];
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	unsigned max_width;
56*4882a593Smuzhiyun 	unsigned max_height;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	struct {
59*4882a593Smuzhiyun 		spinlock_t lock;
60*4882a593Smuzhiyun 		bool active;
61*4882a593Smuzhiyun 		unsigned x1;
62*4882a593Smuzhiyun 		unsigned y1;
63*4882a593Smuzhiyun 		unsigned x2;
64*4882a593Smuzhiyun 		unsigned y2;
65*4882a593Smuzhiyun 	} dirty;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	struct drm_crtc *crtc;
68*4882a593Smuzhiyun 	struct drm_connector *con;
69*4882a593Smuzhiyun 	struct delayed_work local_work;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
vmw_fb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)72*4882a593Smuzhiyun static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
73*4882a593Smuzhiyun 			    unsigned blue, unsigned transp,
74*4882a593Smuzhiyun 			    struct fb_info *info)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct vmw_fb_par *par = info->par;
77*4882a593Smuzhiyun 	u32 *pal = par->pseudo_palette;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (regno > 15) {
80*4882a593Smuzhiyun 		DRM_ERROR("Bad regno %u.\n", regno);
81*4882a593Smuzhiyun 		return 1;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	switch (par->set_fb->format->depth) {
85*4882a593Smuzhiyun 	case 24:
86*4882a593Smuzhiyun 	case 32:
87*4882a593Smuzhiyun 		pal[regno] = ((red & 0xff00) << 8) |
88*4882a593Smuzhiyun 			      (green & 0xff00) |
89*4882a593Smuzhiyun 			     ((blue  & 0xff00) >> 8);
90*4882a593Smuzhiyun 		break;
91*4882a593Smuzhiyun 	default:
92*4882a593Smuzhiyun 		DRM_ERROR("Bad depth %u, bpp %u.\n",
93*4882a593Smuzhiyun 			  par->set_fb->format->depth,
94*4882a593Smuzhiyun 			  par->set_fb->format->cpp[0] * 8);
95*4882a593Smuzhiyun 		return 1;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
vmw_fb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)101*4882a593Smuzhiyun static int vmw_fb_check_var(struct fb_var_screeninfo *var,
102*4882a593Smuzhiyun 			    struct fb_info *info)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	int depth = var->bits_per_pixel;
105*4882a593Smuzhiyun 	struct vmw_fb_par *par = info->par;
106*4882a593Smuzhiyun 	struct vmw_private *vmw_priv = par->vmw_priv;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	switch (var->bits_per_pixel) {
109*4882a593Smuzhiyun 	case 32:
110*4882a593Smuzhiyun 		depth = (var->transp.length > 0) ? 32 : 24;
111*4882a593Smuzhiyun 		break;
112*4882a593Smuzhiyun 	default:
113*4882a593Smuzhiyun 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
114*4882a593Smuzhiyun 		return -EINVAL;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	switch (depth) {
118*4882a593Smuzhiyun 	case 24:
119*4882a593Smuzhiyun 		var->red.offset = 16;
120*4882a593Smuzhiyun 		var->green.offset = 8;
121*4882a593Smuzhiyun 		var->blue.offset = 0;
122*4882a593Smuzhiyun 		var->red.length = 8;
123*4882a593Smuzhiyun 		var->green.length = 8;
124*4882a593Smuzhiyun 		var->blue.length = 8;
125*4882a593Smuzhiyun 		var->transp.length = 0;
126*4882a593Smuzhiyun 		var->transp.offset = 0;
127*4882a593Smuzhiyun 		break;
128*4882a593Smuzhiyun 	case 32:
129*4882a593Smuzhiyun 		var->red.offset = 16;
130*4882a593Smuzhiyun 		var->green.offset = 8;
131*4882a593Smuzhiyun 		var->blue.offset = 0;
132*4882a593Smuzhiyun 		var->red.length = 8;
133*4882a593Smuzhiyun 		var->green.length = 8;
134*4882a593Smuzhiyun 		var->blue.length = 8;
135*4882a593Smuzhiyun 		var->transp.length = 8;
136*4882a593Smuzhiyun 		var->transp.offset = 24;
137*4882a593Smuzhiyun 		break;
138*4882a593Smuzhiyun 	default:
139*4882a593Smuzhiyun 		DRM_ERROR("Bad depth %u.\n", depth);
140*4882a593Smuzhiyun 		return -EINVAL;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if ((var->xoffset + var->xres) > par->max_width ||
144*4882a593Smuzhiyun 	    (var->yoffset + var->yres) > par->max_height) {
145*4882a593Smuzhiyun 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
146*4882a593Smuzhiyun 		return -EINVAL;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (!vmw_kms_validate_mode_vram(vmw_priv,
150*4882a593Smuzhiyun 					var->xres * var->bits_per_pixel/8,
151*4882a593Smuzhiyun 					var->yoffset + var->yres)) {
152*4882a593Smuzhiyun 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
153*4882a593Smuzhiyun 		return -EINVAL;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
vmw_fb_blank(int blank,struct fb_info * info)159*4882a593Smuzhiyun static int vmw_fb_blank(int blank, struct fb_info *info)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun  * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * @work: The struct work_struct associated with this task.
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * This function flushes the dirty regions of the vmalloc framebuffer to the
170*4882a593Smuzhiyun  * kms framebuffer, and if the kms framebuffer is visible, also updated the
171*4882a593Smuzhiyun  * corresponding displays. Note that this function runs even if the kms
172*4882a593Smuzhiyun  * framebuffer is not bound to a crtc and thus not visible, but it's turned
173*4882a593Smuzhiyun  * off during hibernation using the par->dirty.active bool.
174*4882a593Smuzhiyun  */
vmw_fb_dirty_flush(struct work_struct * work)175*4882a593Smuzhiyun static void vmw_fb_dirty_flush(struct work_struct *work)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
178*4882a593Smuzhiyun 					      local_work.work);
179*4882a593Smuzhiyun 	struct vmw_private *vmw_priv = par->vmw_priv;
180*4882a593Smuzhiyun 	struct fb_info *info = vmw_priv->fb_info;
181*4882a593Smuzhiyun 	unsigned long irq_flags;
182*4882a593Smuzhiyun 	s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
183*4882a593Smuzhiyun 	u32 cpp, max_x, max_y;
184*4882a593Smuzhiyun 	struct drm_clip_rect clip;
185*4882a593Smuzhiyun 	struct drm_framebuffer *cur_fb;
186*4882a593Smuzhiyun 	u8 *src_ptr, *dst_ptr;
187*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo = par->vmw_bo;
188*4882a593Smuzhiyun 	void *virtual;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (!READ_ONCE(par->dirty.active))
191*4882a593Smuzhiyun 		return;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	mutex_lock(&par->bo_mutex);
194*4882a593Smuzhiyun 	cur_fb = par->set_fb;
195*4882a593Smuzhiyun 	if (!cur_fb)
196*4882a593Smuzhiyun 		goto out_unlock;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	(void) ttm_read_lock(&vmw_priv->reservation_sem, false);
199*4882a593Smuzhiyun 	(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
200*4882a593Smuzhiyun 	virtual = vmw_bo_map_and_cache(vbo);
201*4882a593Smuzhiyun 	if (!virtual)
202*4882a593Smuzhiyun 		goto out_unreserve;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	spin_lock_irqsave(&par->dirty.lock, irq_flags);
205*4882a593Smuzhiyun 	if (!par->dirty.active) {
206*4882a593Smuzhiyun 		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
207*4882a593Smuzhiyun 		goto out_unreserve;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/*
211*4882a593Smuzhiyun 	 * Handle panning when copying from vmalloc to framebuffer.
212*4882a593Smuzhiyun 	 * Clip dirty area to framebuffer.
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	cpp = cur_fb->format->cpp[0];
215*4882a593Smuzhiyun 	max_x = par->fb_x + cur_fb->width;
216*4882a593Smuzhiyun 	max_y = par->fb_y + cur_fb->height;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	dst_x1 = par->dirty.x1 - par->fb_x;
219*4882a593Smuzhiyun 	dst_y1 = par->dirty.y1 - par->fb_y;
220*4882a593Smuzhiyun 	dst_x1 = max_t(s32, dst_x1, 0);
221*4882a593Smuzhiyun 	dst_y1 = max_t(s32, dst_y1, 0);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	dst_x2 = par->dirty.x2 - par->fb_x;
224*4882a593Smuzhiyun 	dst_y2 = par->dirty.y2 - par->fb_y;
225*4882a593Smuzhiyun 	dst_x2 = min_t(s32, dst_x2, max_x);
226*4882a593Smuzhiyun 	dst_y2 = min_t(s32, dst_y2, max_y);
227*4882a593Smuzhiyun 	w = dst_x2 - dst_x1;
228*4882a593Smuzhiyun 	h = dst_y2 - dst_y1;
229*4882a593Smuzhiyun 	w = max_t(s32, 0, w);
230*4882a593Smuzhiyun 	h = max_t(s32, 0, h);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	par->dirty.x1 = par->dirty.x2 = 0;
233*4882a593Smuzhiyun 	par->dirty.y1 = par->dirty.y2 = 0;
234*4882a593Smuzhiyun 	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (w && h) {
237*4882a593Smuzhiyun 		dst_ptr = (u8 *)virtual  +
238*4882a593Smuzhiyun 			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
239*4882a593Smuzhiyun 		src_ptr = (u8 *)par->vmalloc +
240*4882a593Smuzhiyun 			((dst_y1 + par->fb_y) * info->fix.line_length +
241*4882a593Smuzhiyun 			 (dst_x1 + par->fb_x) * cpp);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		while (h-- > 0) {
244*4882a593Smuzhiyun 			memcpy(dst_ptr, src_ptr, w*cpp);
245*4882a593Smuzhiyun 			dst_ptr += par->set_fb->pitches[0];
246*4882a593Smuzhiyun 			src_ptr += info->fix.line_length;
247*4882a593Smuzhiyun 		}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		clip.x1 = dst_x1;
250*4882a593Smuzhiyun 		clip.x2 = dst_x2;
251*4882a593Smuzhiyun 		clip.y1 = dst_y1;
252*4882a593Smuzhiyun 		clip.y2 = dst_y2;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun out_unreserve:
256*4882a593Smuzhiyun 	ttm_bo_unreserve(&vbo->base);
257*4882a593Smuzhiyun 	ttm_read_unlock(&vmw_priv->reservation_sem);
258*4882a593Smuzhiyun 	if (w && h) {
259*4882a593Smuzhiyun 		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
260*4882a593Smuzhiyun 						       &clip, 1));
261*4882a593Smuzhiyun 		vmw_fifo_flush(vmw_priv, false);
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun out_unlock:
264*4882a593Smuzhiyun 	mutex_unlock(&par->bo_mutex);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
vmw_fb_dirty_mark(struct vmw_fb_par * par,unsigned x1,unsigned y1,unsigned width,unsigned height)267*4882a593Smuzhiyun static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
268*4882a593Smuzhiyun 			      unsigned x1, unsigned y1,
269*4882a593Smuzhiyun 			      unsigned width, unsigned height)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	unsigned long flags;
272*4882a593Smuzhiyun 	unsigned x2 = x1 + width;
273*4882a593Smuzhiyun 	unsigned y2 = y1 + height;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	spin_lock_irqsave(&par->dirty.lock, flags);
276*4882a593Smuzhiyun 	if (par->dirty.x1 == par->dirty.x2) {
277*4882a593Smuzhiyun 		par->dirty.x1 = x1;
278*4882a593Smuzhiyun 		par->dirty.y1 = y1;
279*4882a593Smuzhiyun 		par->dirty.x2 = x2;
280*4882a593Smuzhiyun 		par->dirty.y2 = y2;
281*4882a593Smuzhiyun 		/* if we are active start the dirty work
282*4882a593Smuzhiyun 		 * we share the work with the defio system */
283*4882a593Smuzhiyun 		if (par->dirty.active)
284*4882a593Smuzhiyun 			schedule_delayed_work(&par->local_work,
285*4882a593Smuzhiyun 					      VMW_DIRTY_DELAY);
286*4882a593Smuzhiyun 	} else {
287*4882a593Smuzhiyun 		if (x1 < par->dirty.x1)
288*4882a593Smuzhiyun 			par->dirty.x1 = x1;
289*4882a593Smuzhiyun 		if (y1 < par->dirty.y1)
290*4882a593Smuzhiyun 			par->dirty.y1 = y1;
291*4882a593Smuzhiyun 		if (x2 > par->dirty.x2)
292*4882a593Smuzhiyun 			par->dirty.x2 = x2;
293*4882a593Smuzhiyun 		if (y2 > par->dirty.y2)
294*4882a593Smuzhiyun 			par->dirty.y2 = y2;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 	spin_unlock_irqrestore(&par->dirty.lock, flags);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
vmw_fb_pan_display(struct fb_var_screeninfo * var,struct fb_info * info)299*4882a593Smuzhiyun static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
300*4882a593Smuzhiyun 			      struct fb_info *info)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct vmw_fb_par *par = info->par;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if ((var->xoffset + var->xres) > var->xres_virtual ||
305*4882a593Smuzhiyun 	    (var->yoffset + var->yres) > var->yres_virtual) {
306*4882a593Smuzhiyun 		DRM_ERROR("Requested panning can not fit in framebuffer\n");
307*4882a593Smuzhiyun 		return -EINVAL;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	mutex_lock(&par->bo_mutex);
311*4882a593Smuzhiyun 	par->fb_x = var->xoffset;
312*4882a593Smuzhiyun 	par->fb_y = var->yoffset;
313*4882a593Smuzhiyun 	if (par->set_fb)
314*4882a593Smuzhiyun 		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
315*4882a593Smuzhiyun 				  par->set_fb->height);
316*4882a593Smuzhiyun 	mutex_unlock(&par->bo_mutex);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	return 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
vmw_deferred_io(struct fb_info * info,struct list_head * pagelist)321*4882a593Smuzhiyun static void vmw_deferred_io(struct fb_info *info,
322*4882a593Smuzhiyun 			    struct list_head *pagelist)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct vmw_fb_par *par = info->par;
325*4882a593Smuzhiyun 	unsigned long start, end, min, max;
326*4882a593Smuzhiyun 	unsigned long flags;
327*4882a593Smuzhiyun 	struct page *page;
328*4882a593Smuzhiyun 	int y1, y2;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	min = ULONG_MAX;
331*4882a593Smuzhiyun 	max = 0;
332*4882a593Smuzhiyun 	list_for_each_entry(page, pagelist, lru) {
333*4882a593Smuzhiyun 		start = page->index << PAGE_SHIFT;
334*4882a593Smuzhiyun 		end = start + PAGE_SIZE - 1;
335*4882a593Smuzhiyun 		min = min(min, start);
336*4882a593Smuzhiyun 		max = max(max, end);
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (min < max) {
340*4882a593Smuzhiyun 		y1 = min / info->fix.line_length;
341*4882a593Smuzhiyun 		y2 = (max / info->fix.line_length) + 1;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		spin_lock_irqsave(&par->dirty.lock, flags);
344*4882a593Smuzhiyun 		par->dirty.x1 = 0;
345*4882a593Smuzhiyun 		par->dirty.y1 = y1;
346*4882a593Smuzhiyun 		par->dirty.x2 = info->var.xres;
347*4882a593Smuzhiyun 		par->dirty.y2 = y2;
348*4882a593Smuzhiyun 		spin_unlock_irqrestore(&par->dirty.lock, flags);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		/*
351*4882a593Smuzhiyun 		 * Since we've already waited on this work once, try to
352*4882a593Smuzhiyun 		 * execute asap.
353*4882a593Smuzhiyun 		 */
354*4882a593Smuzhiyun 		cancel_delayed_work(&par->local_work);
355*4882a593Smuzhiyun 		schedule_delayed_work(&par->local_work, 0);
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun };
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun static struct fb_deferred_io vmw_defio = {
360*4882a593Smuzhiyun 	.delay		= VMW_DIRTY_DELAY,
361*4882a593Smuzhiyun 	.deferred_io	= vmw_deferred_io,
362*4882a593Smuzhiyun };
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun  * Draw code
366*4882a593Smuzhiyun  */
367*4882a593Smuzhiyun 
vmw_fb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)368*4882a593Smuzhiyun static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	cfb_fillrect(info, rect);
371*4882a593Smuzhiyun 	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
372*4882a593Smuzhiyun 			  rect->width, rect->height);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
vmw_fb_copyarea(struct fb_info * info,const struct fb_copyarea * region)375*4882a593Smuzhiyun static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	cfb_copyarea(info, region);
378*4882a593Smuzhiyun 	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
379*4882a593Smuzhiyun 			  region->width, region->height);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
vmw_fb_imageblit(struct fb_info * info,const struct fb_image * image)382*4882a593Smuzhiyun static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	cfb_imageblit(info, image);
385*4882a593Smuzhiyun 	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
386*4882a593Smuzhiyun 			  image->width, image->height);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun  * Bring up code
391*4882a593Smuzhiyun  */
392*4882a593Smuzhiyun 
vmw_fb_create_bo(struct vmw_private * vmw_priv,size_t size,struct vmw_buffer_object ** out)393*4882a593Smuzhiyun static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
394*4882a593Smuzhiyun 			    size_t size, struct vmw_buffer_object **out)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo;
397*4882a593Smuzhiyun 	int ret;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
402*4882a593Smuzhiyun 	if (!vmw_bo) {
403*4882a593Smuzhiyun 		ret = -ENOMEM;
404*4882a593Smuzhiyun 		goto err_unlock;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	ret = vmw_bo_init(vmw_priv, vmw_bo, size,
408*4882a593Smuzhiyun 			      &vmw_sys_placement,
409*4882a593Smuzhiyun 			      false,
410*4882a593Smuzhiyun 			      &vmw_bo_bo_free);
411*4882a593Smuzhiyun 	if (unlikely(ret != 0))
412*4882a593Smuzhiyun 		goto err_unlock; /* init frees the buffer on failure */
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	*out = vmw_bo;
415*4882a593Smuzhiyun 	ttm_write_unlock(&vmw_priv->reservation_sem);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun err_unlock:
420*4882a593Smuzhiyun 	ttm_write_unlock(&vmw_priv->reservation_sem);
421*4882a593Smuzhiyun 	return ret;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
vmw_fb_compute_depth(struct fb_var_screeninfo * var,int * depth)424*4882a593Smuzhiyun static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
425*4882a593Smuzhiyun 				int *depth)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	switch (var->bits_per_pixel) {
428*4882a593Smuzhiyun 	case 32:
429*4882a593Smuzhiyun 		*depth = (var->transp.length > 0) ? 32 : 24;
430*4882a593Smuzhiyun 		break;
431*4882a593Smuzhiyun 	default:
432*4882a593Smuzhiyun 		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
433*4882a593Smuzhiyun 		return -EINVAL;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
vmwgfx_set_config_internal(struct drm_mode_set * set)439*4882a593Smuzhiyun static int vmwgfx_set_config_internal(struct drm_mode_set *set)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct drm_crtc *crtc = set->crtc;
442*4882a593Smuzhiyun 	struct drm_modeset_acquire_ctx ctx;
443*4882a593Smuzhiyun 	int ret;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	drm_modeset_acquire_init(&ctx, 0);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun restart:
448*4882a593Smuzhiyun 	ret = crtc->funcs->set_config(set, &ctx);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (ret == -EDEADLK) {
451*4882a593Smuzhiyun 		drm_modeset_backoff(&ctx);
452*4882a593Smuzhiyun 		goto restart;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	drm_modeset_drop_locks(&ctx);
456*4882a593Smuzhiyun 	drm_modeset_acquire_fini(&ctx);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return ret;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
vmw_fb_kms_detach(struct vmw_fb_par * par,bool detach_bo,bool unref_bo)461*4882a593Smuzhiyun static int vmw_fb_kms_detach(struct vmw_fb_par *par,
462*4882a593Smuzhiyun 			     bool detach_bo,
463*4882a593Smuzhiyun 			     bool unref_bo)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	struct drm_framebuffer *cur_fb = par->set_fb;
466*4882a593Smuzhiyun 	int ret;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/* Detach the KMS framebuffer from crtcs */
469*4882a593Smuzhiyun 	if (par->set_mode) {
470*4882a593Smuzhiyun 		struct drm_mode_set set;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		set.crtc = par->crtc;
473*4882a593Smuzhiyun 		set.x = 0;
474*4882a593Smuzhiyun 		set.y = 0;
475*4882a593Smuzhiyun 		set.mode = NULL;
476*4882a593Smuzhiyun 		set.fb = NULL;
477*4882a593Smuzhiyun 		set.num_connectors = 0;
478*4882a593Smuzhiyun 		set.connectors = &par->con;
479*4882a593Smuzhiyun 		ret = vmwgfx_set_config_internal(&set);
480*4882a593Smuzhiyun 		if (ret) {
481*4882a593Smuzhiyun 			DRM_ERROR("Could not unset a mode.\n");
482*4882a593Smuzhiyun 			return ret;
483*4882a593Smuzhiyun 		}
484*4882a593Smuzhiyun 		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
485*4882a593Smuzhiyun 		par->set_mode = NULL;
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (cur_fb) {
489*4882a593Smuzhiyun 		drm_framebuffer_put(cur_fb);
490*4882a593Smuzhiyun 		par->set_fb = NULL;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	if (par->vmw_bo && detach_bo && unref_bo)
494*4882a593Smuzhiyun 		vmw_bo_unreference(&par->vmw_bo);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
vmw_fb_kms_framebuffer(struct fb_info * info)499*4882a593Smuzhiyun static int vmw_fb_kms_framebuffer(struct fb_info *info)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	struct drm_mode_fb_cmd2 mode_cmd = {0};
502*4882a593Smuzhiyun 	struct vmw_fb_par *par = info->par;
503*4882a593Smuzhiyun 	struct fb_var_screeninfo *var = &info->var;
504*4882a593Smuzhiyun 	struct drm_framebuffer *cur_fb;
505*4882a593Smuzhiyun 	struct vmw_framebuffer *vfb;
506*4882a593Smuzhiyun 	int ret = 0, depth;
507*4882a593Smuzhiyun 	size_t new_bo_size;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	ret = vmw_fb_compute_depth(var, &depth);
510*4882a593Smuzhiyun 	if (ret)
511*4882a593Smuzhiyun 		return ret;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	mode_cmd.width = var->xres;
514*4882a593Smuzhiyun 	mode_cmd.height = var->yres;
515*4882a593Smuzhiyun 	mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
516*4882a593Smuzhiyun 	mode_cmd.pixel_format =
517*4882a593Smuzhiyun 		drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	cur_fb = par->set_fb;
520*4882a593Smuzhiyun 	if (cur_fb && cur_fb->width == mode_cmd.width &&
521*4882a593Smuzhiyun 	    cur_fb->height == mode_cmd.height &&
522*4882a593Smuzhiyun 	    cur_fb->format->format == mode_cmd.pixel_format &&
523*4882a593Smuzhiyun 	    cur_fb->pitches[0] == mode_cmd.pitches[0])
524*4882a593Smuzhiyun 		return 0;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* Need new buffer object ? */
527*4882a593Smuzhiyun 	new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
528*4882a593Smuzhiyun 	ret = vmw_fb_kms_detach(par,
529*4882a593Smuzhiyun 				par->bo_size < new_bo_size ||
530*4882a593Smuzhiyun 				par->bo_size > 2*new_bo_size,
531*4882a593Smuzhiyun 				true);
532*4882a593Smuzhiyun 	if (ret)
533*4882a593Smuzhiyun 		return ret;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (!par->vmw_bo) {
536*4882a593Smuzhiyun 		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
537*4882a593Smuzhiyun 				       &par->vmw_bo);
538*4882a593Smuzhiyun 		if (ret) {
539*4882a593Smuzhiyun 			DRM_ERROR("Failed creating a buffer object for "
540*4882a593Smuzhiyun 				  "fbdev.\n");
541*4882a593Smuzhiyun 			return ret;
542*4882a593Smuzhiyun 		}
543*4882a593Smuzhiyun 		par->bo_size = new_bo_size;
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
547*4882a593Smuzhiyun 				      true, &mode_cmd);
548*4882a593Smuzhiyun 	if (IS_ERR(vfb))
549*4882a593Smuzhiyun 		return PTR_ERR(vfb);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	par->set_fb = &vfb->base;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
vmw_fb_set_par(struct fb_info * info)556*4882a593Smuzhiyun static int vmw_fb_set_par(struct fb_info *info)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	struct vmw_fb_par *par = info->par;
559*4882a593Smuzhiyun 	struct vmw_private *vmw_priv = par->vmw_priv;
560*4882a593Smuzhiyun 	struct drm_mode_set set;
561*4882a593Smuzhiyun 	struct fb_var_screeninfo *var = &info->var;
562*4882a593Smuzhiyun 	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
563*4882a593Smuzhiyun 		DRM_MODE_TYPE_DRIVER,
564*4882a593Smuzhiyun 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
565*4882a593Smuzhiyun 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
566*4882a593Smuzhiyun 	};
567*4882a593Smuzhiyun 	struct drm_display_mode *mode;
568*4882a593Smuzhiyun 	int ret;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
571*4882a593Smuzhiyun 	if (!mode) {
572*4882a593Smuzhiyun 		DRM_ERROR("Could not create new fb mode.\n");
573*4882a593Smuzhiyun 		return -ENOMEM;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	mode->hdisplay = var->xres;
577*4882a593Smuzhiyun 	mode->vdisplay = var->yres;
578*4882a593Smuzhiyun 	vmw_guess_mode_timing(mode);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (!vmw_kms_validate_mode_vram(vmw_priv,
581*4882a593Smuzhiyun 					mode->hdisplay *
582*4882a593Smuzhiyun 					DIV_ROUND_UP(var->bits_per_pixel, 8),
583*4882a593Smuzhiyun 					mode->vdisplay)) {
584*4882a593Smuzhiyun 		drm_mode_destroy(vmw_priv->dev, mode);
585*4882a593Smuzhiyun 		return -EINVAL;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	mutex_lock(&par->bo_mutex);
589*4882a593Smuzhiyun 	ret = vmw_fb_kms_framebuffer(info);
590*4882a593Smuzhiyun 	if (ret)
591*4882a593Smuzhiyun 		goto out_unlock;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	par->fb_x = var->xoffset;
594*4882a593Smuzhiyun 	par->fb_y = var->yoffset;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	set.crtc = par->crtc;
597*4882a593Smuzhiyun 	set.x = 0;
598*4882a593Smuzhiyun 	set.y = 0;
599*4882a593Smuzhiyun 	set.mode = mode;
600*4882a593Smuzhiyun 	set.fb = par->set_fb;
601*4882a593Smuzhiyun 	set.num_connectors = 1;
602*4882a593Smuzhiyun 	set.connectors = &par->con;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	ret = vmwgfx_set_config_internal(&set);
605*4882a593Smuzhiyun 	if (ret)
606*4882a593Smuzhiyun 		goto out_unlock;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
609*4882a593Smuzhiyun 			  par->set_fb->width, par->set_fb->height);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* If there already was stuff dirty we wont
612*4882a593Smuzhiyun 	 * schedule a new work, so lets do it now */
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	schedule_delayed_work(&par->local_work, 0);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun out_unlock:
617*4882a593Smuzhiyun 	if (par->set_mode)
618*4882a593Smuzhiyun 		drm_mode_destroy(vmw_priv->dev, par->set_mode);
619*4882a593Smuzhiyun 	par->set_mode = mode;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	mutex_unlock(&par->bo_mutex);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	return ret;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun static const struct fb_ops vmw_fb_ops = {
628*4882a593Smuzhiyun 	.owner = THIS_MODULE,
629*4882a593Smuzhiyun 	.fb_check_var = vmw_fb_check_var,
630*4882a593Smuzhiyun 	.fb_set_par = vmw_fb_set_par,
631*4882a593Smuzhiyun 	.fb_setcolreg = vmw_fb_setcolreg,
632*4882a593Smuzhiyun 	.fb_fillrect = vmw_fb_fillrect,
633*4882a593Smuzhiyun 	.fb_copyarea = vmw_fb_copyarea,
634*4882a593Smuzhiyun 	.fb_imageblit = vmw_fb_imageblit,
635*4882a593Smuzhiyun 	.fb_pan_display = vmw_fb_pan_display,
636*4882a593Smuzhiyun 	.fb_blank = vmw_fb_blank,
637*4882a593Smuzhiyun };
638*4882a593Smuzhiyun 
vmw_fb_init(struct vmw_private * vmw_priv)639*4882a593Smuzhiyun int vmw_fb_init(struct vmw_private *vmw_priv)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	struct device *device = &vmw_priv->dev->pdev->dev;
642*4882a593Smuzhiyun 	struct vmw_fb_par *par;
643*4882a593Smuzhiyun 	struct fb_info *info;
644*4882a593Smuzhiyun 	unsigned fb_width, fb_height;
645*4882a593Smuzhiyun 	unsigned int fb_bpp, fb_pitch, fb_size;
646*4882a593Smuzhiyun 	struct drm_display_mode *init_mode;
647*4882a593Smuzhiyun 	int ret;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	fb_bpp = 32;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* XXX As shouldn't these be as well. */
652*4882a593Smuzhiyun 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
653*4882a593Smuzhiyun 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	fb_pitch = fb_width * fb_bpp / 8;
656*4882a593Smuzhiyun 	fb_size = fb_pitch * fb_height;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	info = framebuffer_alloc(sizeof(*par), device);
659*4882a593Smuzhiyun 	if (!info)
660*4882a593Smuzhiyun 		return -ENOMEM;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/*
663*4882a593Smuzhiyun 	 * Par
664*4882a593Smuzhiyun 	 */
665*4882a593Smuzhiyun 	vmw_priv->fb_info = info;
666*4882a593Smuzhiyun 	par = info->par;
667*4882a593Smuzhiyun 	memset(par, 0, sizeof(*par));
668*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
669*4882a593Smuzhiyun 	par->vmw_priv = vmw_priv;
670*4882a593Smuzhiyun 	par->vmalloc = NULL;
671*4882a593Smuzhiyun 	par->max_width = fb_width;
672*4882a593Smuzhiyun 	par->max_height = fb_height;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
675*4882a593Smuzhiyun 				      par->max_height, &par->con,
676*4882a593Smuzhiyun 				      &par->crtc, &init_mode);
677*4882a593Smuzhiyun 	if (ret)
678*4882a593Smuzhiyun 		goto err_kms;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	info->var.xres = init_mode->hdisplay;
681*4882a593Smuzhiyun 	info->var.yres = init_mode->vdisplay;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	/*
684*4882a593Smuzhiyun 	 * Create buffers and alloc memory
685*4882a593Smuzhiyun 	 */
686*4882a593Smuzhiyun 	par->vmalloc = vzalloc(fb_size);
687*4882a593Smuzhiyun 	if (unlikely(par->vmalloc == NULL)) {
688*4882a593Smuzhiyun 		ret = -ENOMEM;
689*4882a593Smuzhiyun 		goto err_free;
690*4882a593Smuzhiyun 	}
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	/*
693*4882a593Smuzhiyun 	 * Fixed and var
694*4882a593Smuzhiyun 	 */
695*4882a593Smuzhiyun 	strcpy(info->fix.id, "svgadrmfb");
696*4882a593Smuzhiyun 	info->fix.type = FB_TYPE_PACKED_PIXELS;
697*4882a593Smuzhiyun 	info->fix.visual = FB_VISUAL_TRUECOLOR;
698*4882a593Smuzhiyun 	info->fix.type_aux = 0;
699*4882a593Smuzhiyun 	info->fix.xpanstep = 1; /* doing it in hw */
700*4882a593Smuzhiyun 	info->fix.ypanstep = 1; /* doing it in hw */
701*4882a593Smuzhiyun 	info->fix.ywrapstep = 0;
702*4882a593Smuzhiyun 	info->fix.accel = FB_ACCEL_NONE;
703*4882a593Smuzhiyun 	info->fix.line_length = fb_pitch;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	info->fix.smem_start = 0;
706*4882a593Smuzhiyun 	info->fix.smem_len = fb_size;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	info->pseudo_palette = par->pseudo_palette;
709*4882a593Smuzhiyun 	info->screen_base = (char __iomem *)par->vmalloc;
710*4882a593Smuzhiyun 	info->screen_size = fb_size;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	info->fbops = &vmw_fb_ops;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	/* 24 depth per default */
715*4882a593Smuzhiyun 	info->var.red.offset = 16;
716*4882a593Smuzhiyun 	info->var.green.offset = 8;
717*4882a593Smuzhiyun 	info->var.blue.offset = 0;
718*4882a593Smuzhiyun 	info->var.red.length = 8;
719*4882a593Smuzhiyun 	info->var.green.length = 8;
720*4882a593Smuzhiyun 	info->var.blue.length = 8;
721*4882a593Smuzhiyun 	info->var.transp.offset = 0;
722*4882a593Smuzhiyun 	info->var.transp.length = 0;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	info->var.xres_virtual = fb_width;
725*4882a593Smuzhiyun 	info->var.yres_virtual = fb_height;
726*4882a593Smuzhiyun 	info->var.bits_per_pixel = fb_bpp;
727*4882a593Smuzhiyun 	info->var.xoffset = 0;
728*4882a593Smuzhiyun 	info->var.yoffset = 0;
729*4882a593Smuzhiyun 	info->var.activate = FB_ACTIVATE_NOW;
730*4882a593Smuzhiyun 	info->var.height = -1;
731*4882a593Smuzhiyun 	info->var.width = -1;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
734*4882a593Smuzhiyun 	info->apertures = alloc_apertures(1);
735*4882a593Smuzhiyun 	if (!info->apertures) {
736*4882a593Smuzhiyun 		ret = -ENOMEM;
737*4882a593Smuzhiyun 		goto err_aper;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 	info->apertures->ranges[0].base = vmw_priv->vram_start;
740*4882a593Smuzhiyun 	info->apertures->ranges[0].size = vmw_priv->vram_size;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	/*
743*4882a593Smuzhiyun 	 * Dirty & Deferred IO
744*4882a593Smuzhiyun 	 */
745*4882a593Smuzhiyun 	par->dirty.x1 = par->dirty.x2 = 0;
746*4882a593Smuzhiyun 	par->dirty.y1 = par->dirty.y2 = 0;
747*4882a593Smuzhiyun 	par->dirty.active = true;
748*4882a593Smuzhiyun 	spin_lock_init(&par->dirty.lock);
749*4882a593Smuzhiyun 	mutex_init(&par->bo_mutex);
750*4882a593Smuzhiyun 	info->fbdefio = &vmw_defio;
751*4882a593Smuzhiyun 	fb_deferred_io_init(info);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	ret = register_framebuffer(info);
754*4882a593Smuzhiyun 	if (unlikely(ret != 0))
755*4882a593Smuzhiyun 		goto err_defio;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	vmw_fb_set_par(info);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	return 0;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun err_defio:
762*4882a593Smuzhiyun 	fb_deferred_io_cleanup(info);
763*4882a593Smuzhiyun err_aper:
764*4882a593Smuzhiyun err_free:
765*4882a593Smuzhiyun 	vfree(par->vmalloc);
766*4882a593Smuzhiyun err_kms:
767*4882a593Smuzhiyun 	framebuffer_release(info);
768*4882a593Smuzhiyun 	vmw_priv->fb_info = NULL;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return ret;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
vmw_fb_close(struct vmw_private * vmw_priv)773*4882a593Smuzhiyun int vmw_fb_close(struct vmw_private *vmw_priv)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct fb_info *info;
776*4882a593Smuzhiyun 	struct vmw_fb_par *par;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	if (!vmw_priv->fb_info)
779*4882a593Smuzhiyun 		return 0;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	info = vmw_priv->fb_info;
782*4882a593Smuzhiyun 	par = info->par;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	/* ??? order */
785*4882a593Smuzhiyun 	fb_deferred_io_cleanup(info);
786*4882a593Smuzhiyun 	cancel_delayed_work_sync(&par->local_work);
787*4882a593Smuzhiyun 	unregister_framebuffer(info);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	mutex_lock(&par->bo_mutex);
790*4882a593Smuzhiyun 	(void) vmw_fb_kms_detach(par, true, true);
791*4882a593Smuzhiyun 	mutex_unlock(&par->bo_mutex);
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	vfree(par->vmalloc);
794*4882a593Smuzhiyun 	framebuffer_release(info);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	return 0;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun 
vmw_fb_off(struct vmw_private * vmw_priv)799*4882a593Smuzhiyun int vmw_fb_off(struct vmw_private *vmw_priv)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	struct fb_info *info;
802*4882a593Smuzhiyun 	struct vmw_fb_par *par;
803*4882a593Smuzhiyun 	unsigned long flags;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (!vmw_priv->fb_info)
806*4882a593Smuzhiyun 		return -EINVAL;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	info = vmw_priv->fb_info;
809*4882a593Smuzhiyun 	par = info->par;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	spin_lock_irqsave(&par->dirty.lock, flags);
812*4882a593Smuzhiyun 	par->dirty.active = false;
813*4882a593Smuzhiyun 	spin_unlock_irqrestore(&par->dirty.lock, flags);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	flush_delayed_work(&info->deferred_work);
816*4882a593Smuzhiyun 	flush_delayed_work(&par->local_work);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	return 0;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun 
vmw_fb_on(struct vmw_private * vmw_priv)821*4882a593Smuzhiyun int vmw_fb_on(struct vmw_private *vmw_priv)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	struct fb_info *info;
824*4882a593Smuzhiyun 	struct vmw_fb_par *par;
825*4882a593Smuzhiyun 	unsigned long flags;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	if (!vmw_priv->fb_info)
828*4882a593Smuzhiyun 		return -EINVAL;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	info = vmw_priv->fb_info;
831*4882a593Smuzhiyun 	par = info->par;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	spin_lock_irqsave(&par->dirty.lock, flags);
834*4882a593Smuzhiyun 	par->dirty.active = true;
835*4882a593Smuzhiyun 	spin_unlock_irqrestore(&par->dirty.lock, flags);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	/*
838*4882a593Smuzhiyun 	 * Need to reschedule a dirty update, because otherwise that's
839*4882a593Smuzhiyun 	 * only done in dirty_mark() if the previous coalesced
840*4882a593Smuzhiyun 	 * dirty region was empty.
841*4882a593Smuzhiyun 	 */
842*4882a593Smuzhiyun 	schedule_delayed_work(&par->local_work, 0);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	return 0;
845*4882a593Smuzhiyun }
846