1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun * the following conditions:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun * of the Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun **************************************************************************/
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "device_include/svga_overlay.h"
31*4882a593Smuzhiyun #include "device_include/svga_escape.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "vmwgfx_drv.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define VMW_MAX_NUM_STREAMS 1
36*4882a593Smuzhiyun #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun struct vmw_stream {
39*4882a593Smuzhiyun struct vmw_buffer_object *buf;
40*4882a593Smuzhiyun bool claimed;
41*4882a593Smuzhiyun bool paused;
42*4882a593Smuzhiyun struct drm_vmw_control_stream_arg saved;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * Overlay control
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun struct vmw_overlay {
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Each stream is a single overlay. In Xv these are called ports.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun struct mutex mutex;
53*4882a593Smuzhiyun struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
vmw_overlay(struct drm_device * dev)56*4882a593Smuzhiyun static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_priv(dev);
59*4882a593Smuzhiyun return dev_priv ? dev_priv->overlay_priv : NULL;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct vmw_escape_header {
63*4882a593Smuzhiyun uint32_t cmd;
64*4882a593Smuzhiyun SVGAFifoCmdEscape body;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun struct vmw_escape_video_flush {
68*4882a593Smuzhiyun struct vmw_escape_header escape;
69*4882a593Smuzhiyun SVGAEscapeVideoFlush flush;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
fill_escape(struct vmw_escape_header * header,uint32_t size)72*4882a593Smuzhiyun static inline void fill_escape(struct vmw_escape_header *header,
73*4882a593Smuzhiyun uint32_t size)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun header->cmd = SVGA_CMD_ESCAPE;
76*4882a593Smuzhiyun header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
77*4882a593Smuzhiyun header->body.size = size;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
fill_flush(struct vmw_escape_video_flush * cmd,uint32_t stream_id)80*4882a593Smuzhiyun static inline void fill_flush(struct vmw_escape_video_flush *cmd,
81*4882a593Smuzhiyun uint32_t stream_id)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun fill_escape(&cmd->escape, sizeof(cmd->flush));
84*4882a593Smuzhiyun cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
85*4882a593Smuzhiyun cmd->flush.streamId = stream_id;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun * Send put command to hw.
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * Returns
92*4882a593Smuzhiyun * -ERESTARTSYS if interrupted by a signal.
93*4882a593Smuzhiyun */
vmw_overlay_send_put(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct drm_vmw_control_stream_arg * arg,bool interruptible)94*4882a593Smuzhiyun static int vmw_overlay_send_put(struct vmw_private *dev_priv,
95*4882a593Smuzhiyun struct vmw_buffer_object *buf,
96*4882a593Smuzhiyun struct drm_vmw_control_stream_arg *arg,
97*4882a593Smuzhiyun bool interruptible)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct vmw_escape_video_flush *flush;
100*4882a593Smuzhiyun size_t fifo_size;
101*4882a593Smuzhiyun bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
102*4882a593Smuzhiyun int i, num_items;
103*4882a593Smuzhiyun SVGAGuestPtr ptr;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun struct {
106*4882a593Smuzhiyun struct vmw_escape_header escape;
107*4882a593Smuzhiyun struct {
108*4882a593Smuzhiyun uint32_t cmdType;
109*4882a593Smuzhiyun uint32_t streamId;
110*4882a593Smuzhiyun } header;
111*4882a593Smuzhiyun } *cmds;
112*4882a593Smuzhiyun struct {
113*4882a593Smuzhiyun uint32_t registerId;
114*4882a593Smuzhiyun uint32_t value;
115*4882a593Smuzhiyun } *items;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* defines are a index needs + 1 */
118*4882a593Smuzhiyun if (have_so)
119*4882a593Smuzhiyun num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
120*4882a593Smuzhiyun else
121*4882a593Smuzhiyun num_items = SVGA_VIDEO_PITCH_3 + 1;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
126*4882a593Smuzhiyun /* hardware has hung, can't do anything here */
127*4882a593Smuzhiyun if (!cmds)
128*4882a593Smuzhiyun return -ENOMEM;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun items = (typeof(items))&cmds[1];
131*4882a593Smuzhiyun flush = (struct vmw_escape_video_flush *)&items[num_items];
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* the size is header + number of items */
134*4882a593Smuzhiyun fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
137*4882a593Smuzhiyun cmds->header.streamId = arg->stream_id;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* the IDs are neatly numbered */
140*4882a593Smuzhiyun for (i = 0; i < num_items; i++)
141*4882a593Smuzhiyun items[i].registerId = i;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun vmw_bo_get_guest_ptr(&buf->base, &ptr);
144*4882a593Smuzhiyun ptr.offset += arg->offset;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun items[SVGA_VIDEO_ENABLED].value = true;
147*4882a593Smuzhiyun items[SVGA_VIDEO_FLAGS].value = arg->flags;
148*4882a593Smuzhiyun items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
149*4882a593Smuzhiyun items[SVGA_VIDEO_FORMAT].value = arg->format;
150*4882a593Smuzhiyun items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
151*4882a593Smuzhiyun items[SVGA_VIDEO_SIZE].value = arg->size;
152*4882a593Smuzhiyun items[SVGA_VIDEO_WIDTH].value = arg->width;
153*4882a593Smuzhiyun items[SVGA_VIDEO_HEIGHT].value = arg->height;
154*4882a593Smuzhiyun items[SVGA_VIDEO_SRC_X].value = arg->src.x;
155*4882a593Smuzhiyun items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
156*4882a593Smuzhiyun items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
157*4882a593Smuzhiyun items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
158*4882a593Smuzhiyun items[SVGA_VIDEO_DST_X].value = arg->dst.x;
159*4882a593Smuzhiyun items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
160*4882a593Smuzhiyun items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
161*4882a593Smuzhiyun items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
162*4882a593Smuzhiyun items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
163*4882a593Smuzhiyun items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
164*4882a593Smuzhiyun items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
165*4882a593Smuzhiyun if (have_so) {
166*4882a593Smuzhiyun items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
167*4882a593Smuzhiyun items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun fill_flush(flush, arg->stream_id);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, fifo_size);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun * Send stop command to hw.
179*4882a593Smuzhiyun *
180*4882a593Smuzhiyun * Returns
181*4882a593Smuzhiyun * -ERESTARTSYS if interrupted by a signal.
182*4882a593Smuzhiyun */
vmw_overlay_send_stop(struct vmw_private * dev_priv,uint32_t stream_id,bool interruptible)183*4882a593Smuzhiyun static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
184*4882a593Smuzhiyun uint32_t stream_id,
185*4882a593Smuzhiyun bool interruptible)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct {
188*4882a593Smuzhiyun struct vmw_escape_header escape;
189*4882a593Smuzhiyun SVGAEscapeVideoSetRegs body;
190*4882a593Smuzhiyun struct vmw_escape_video_flush flush;
191*4882a593Smuzhiyun } *cmds;
192*4882a593Smuzhiyun int ret;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun for (;;) {
195*4882a593Smuzhiyun cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
196*4882a593Smuzhiyun if (cmds)
197*4882a593Smuzhiyun break;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun ret = vmw_fallback_wait(dev_priv, false, true, 0,
200*4882a593Smuzhiyun interruptible, 3*HZ);
201*4882a593Smuzhiyun if (interruptible && ret == -ERESTARTSYS)
202*4882a593Smuzhiyun return ret;
203*4882a593Smuzhiyun else
204*4882a593Smuzhiyun BUG_ON(ret != 0);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun fill_escape(&cmds->escape, sizeof(cmds->body));
208*4882a593Smuzhiyun cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
209*4882a593Smuzhiyun cmds->body.header.streamId = stream_id;
210*4882a593Smuzhiyun cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
211*4882a593Smuzhiyun cmds->body.items[0].value = false;
212*4882a593Smuzhiyun fill_flush(&cmds->flush, stream_id);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmds));
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /**
220*4882a593Smuzhiyun * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * With the introduction of screen objects buffers could now be
223*4882a593Smuzhiyun * used with GMRs instead of being locked to vram.
224*4882a593Smuzhiyun */
vmw_overlay_move_buffer(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool pin,bool inter)225*4882a593Smuzhiyun static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
226*4882a593Smuzhiyun struct vmw_buffer_object *buf,
227*4882a593Smuzhiyun bool pin, bool inter)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun if (!pin)
230*4882a593Smuzhiyun return vmw_bo_unpin(dev_priv, buf, inter);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (dev_priv->active_display_unit == vmw_du_legacy)
233*4882a593Smuzhiyun return vmw_bo_pin_in_vram(dev_priv, buf, inter);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * Stop or pause a stream.
240*4882a593Smuzhiyun *
241*4882a593Smuzhiyun * If the stream is paused the no evict flag is removed from the buffer
242*4882a593Smuzhiyun * but left in vram. This allows for instance mode_set to evict it
243*4882a593Smuzhiyun * should it need to.
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * The caller must hold the overlay lock.
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * @stream_id which stream to stop/pause.
248*4882a593Smuzhiyun * @pause true to pause, false to stop completely.
249*4882a593Smuzhiyun */
vmw_overlay_stop(struct vmw_private * dev_priv,uint32_t stream_id,bool pause,bool interruptible)250*4882a593Smuzhiyun static int vmw_overlay_stop(struct vmw_private *dev_priv,
251*4882a593Smuzhiyun uint32_t stream_id, bool pause,
252*4882a593Smuzhiyun bool interruptible)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
255*4882a593Smuzhiyun struct vmw_stream *stream = &overlay->stream[stream_id];
256*4882a593Smuzhiyun int ret;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* no buffer attached the stream is completely stopped */
259*4882a593Smuzhiyun if (!stream->buf)
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* If the stream is paused this is already done */
263*4882a593Smuzhiyun if (!stream->paused) {
264*4882a593Smuzhiyun ret = vmw_overlay_send_stop(dev_priv, stream_id,
265*4882a593Smuzhiyun interruptible);
266*4882a593Smuzhiyun if (ret)
267*4882a593Smuzhiyun return ret;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* We just remove the NO_EVICT flag so no -ENOMEM */
270*4882a593Smuzhiyun ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
271*4882a593Smuzhiyun interruptible);
272*4882a593Smuzhiyun if (interruptible && ret == -ERESTARTSYS)
273*4882a593Smuzhiyun return ret;
274*4882a593Smuzhiyun else
275*4882a593Smuzhiyun BUG_ON(ret != 0);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (!pause) {
279*4882a593Smuzhiyun vmw_bo_unreference(&stream->buf);
280*4882a593Smuzhiyun stream->paused = false;
281*4882a593Smuzhiyun } else {
282*4882a593Smuzhiyun stream->paused = true;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun * Update a stream and send any put or stop fifo commands needed.
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * The caller must hold the overlay lock.
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * Returns
294*4882a593Smuzhiyun * -ENOMEM if buffer doesn't fit in vram.
295*4882a593Smuzhiyun * -ERESTARTSYS if interrupted.
296*4882a593Smuzhiyun */
vmw_overlay_update_stream(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct drm_vmw_control_stream_arg * arg,bool interruptible)297*4882a593Smuzhiyun static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
298*4882a593Smuzhiyun struct vmw_buffer_object *buf,
299*4882a593Smuzhiyun struct drm_vmw_control_stream_arg *arg,
300*4882a593Smuzhiyun bool interruptible)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
303*4882a593Smuzhiyun struct vmw_stream *stream = &overlay->stream[arg->stream_id];
304*4882a593Smuzhiyun int ret = 0;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (!buf)
307*4882a593Smuzhiyun return -EINVAL;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
310*4882a593Smuzhiyun stream->buf, buf, stream->paused ? "" : "not ");
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (stream->buf != buf) {
313*4882a593Smuzhiyun ret = vmw_overlay_stop(dev_priv, arg->stream_id,
314*4882a593Smuzhiyun false, interruptible);
315*4882a593Smuzhiyun if (ret)
316*4882a593Smuzhiyun return ret;
317*4882a593Smuzhiyun } else if (!stream->paused) {
318*4882a593Smuzhiyun /* If the buffers match and not paused then just send
319*4882a593Smuzhiyun * the put command, no need to do anything else.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
322*4882a593Smuzhiyun if (ret == 0)
323*4882a593Smuzhiyun stream->saved = *arg;
324*4882a593Smuzhiyun else
325*4882a593Smuzhiyun BUG_ON(!interruptible);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* We don't start the old stream if we are interrupted.
331*4882a593Smuzhiyun * Might return -ENOMEM if it can't fit the buffer in vram.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
334*4882a593Smuzhiyun if (ret)
335*4882a593Smuzhiyun return ret;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
338*4882a593Smuzhiyun if (ret) {
339*4882a593Smuzhiyun /* This one needs to happen no matter what. We only remove
340*4882a593Smuzhiyun * the NO_EVICT flag so this is safe from -ENOMEM.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
343*4882a593Smuzhiyun != 0);
344*4882a593Smuzhiyun return ret;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (stream->buf != buf)
348*4882a593Smuzhiyun stream->buf = vmw_bo_reference(buf);
349*4882a593Smuzhiyun stream->saved = *arg;
350*4882a593Smuzhiyun /* stream is no longer stopped/paused */
351*4882a593Smuzhiyun stream->paused = false;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun * Try to resume all paused streams.
358*4882a593Smuzhiyun *
359*4882a593Smuzhiyun * Used by the kms code after moving a new scanout buffer to vram.
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * Takes the overlay lock.
362*4882a593Smuzhiyun */
vmw_overlay_resume_all(struct vmw_private * dev_priv)363*4882a593Smuzhiyun int vmw_overlay_resume_all(struct vmw_private *dev_priv)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
366*4882a593Smuzhiyun int i, ret;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (!overlay)
369*4882a593Smuzhiyun return 0;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun mutex_lock(&overlay->mutex);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
374*4882a593Smuzhiyun struct vmw_stream *stream = &overlay->stream[i];
375*4882a593Smuzhiyun if (!stream->paused)
376*4882a593Smuzhiyun continue;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun ret = vmw_overlay_update_stream(dev_priv, stream->buf,
379*4882a593Smuzhiyun &stream->saved, false);
380*4882a593Smuzhiyun if (ret != 0)
381*4882a593Smuzhiyun DRM_INFO("%s: *warning* failed to resume stream %i\n",
382*4882a593Smuzhiyun __func__, i);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /**
391*4882a593Smuzhiyun * Pauses all active streams.
392*4882a593Smuzhiyun *
393*4882a593Smuzhiyun * Used by the kms code when moving a new scanout buffer to vram.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * Takes the overlay lock.
396*4882a593Smuzhiyun */
vmw_overlay_pause_all(struct vmw_private * dev_priv)397*4882a593Smuzhiyun int vmw_overlay_pause_all(struct vmw_private *dev_priv)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
400*4882a593Smuzhiyun int i, ret;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!overlay)
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun mutex_lock(&overlay->mutex);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
408*4882a593Smuzhiyun if (overlay->stream[i].paused)
409*4882a593Smuzhiyun DRM_INFO("%s: *warning* stream %i already paused\n",
410*4882a593Smuzhiyun __func__, i);
411*4882a593Smuzhiyun ret = vmw_overlay_stop(dev_priv, i, true, false);
412*4882a593Smuzhiyun WARN_ON(ret != 0);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun
vmw_overlay_available(const struct vmw_private * dev_priv)421*4882a593Smuzhiyun static bool vmw_overlay_available(const struct vmw_private *dev_priv)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun return (dev_priv->overlay_priv != NULL &&
424*4882a593Smuzhiyun ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
425*4882a593Smuzhiyun VMW_OVERLAY_CAP_MASK));
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
vmw_overlay_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)428*4882a593Smuzhiyun int vmw_overlay_ioctl(struct drm_device *dev, void *data,
429*4882a593Smuzhiyun struct drm_file *file_priv)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
432*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_priv(dev);
433*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
434*4882a593Smuzhiyun struct drm_vmw_control_stream_arg *arg =
435*4882a593Smuzhiyun (struct drm_vmw_control_stream_arg *)data;
436*4882a593Smuzhiyun struct vmw_buffer_object *buf;
437*4882a593Smuzhiyun struct vmw_resource *res;
438*4882a593Smuzhiyun int ret;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!vmw_overlay_available(dev_priv))
441*4882a593Smuzhiyun return -ENOSYS;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
444*4882a593Smuzhiyun if (ret)
445*4882a593Smuzhiyun return ret;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun mutex_lock(&overlay->mutex);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (!arg->enabled) {
450*4882a593Smuzhiyun ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
451*4882a593Smuzhiyun goto out_unlock;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
455*4882a593Smuzhiyun if (ret)
456*4882a593Smuzhiyun goto out_unlock;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun vmw_bo_unreference(&buf);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun out_unlock:
463*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
464*4882a593Smuzhiyun vmw_resource_unreference(&res);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return ret;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
vmw_overlay_num_overlays(struct vmw_private * dev_priv)469*4882a593Smuzhiyun int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun if (!vmw_overlay_available(dev_priv))
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return VMW_MAX_NUM_STREAMS;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
vmw_overlay_num_free_overlays(struct vmw_private * dev_priv)477*4882a593Smuzhiyun int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
480*4882a593Smuzhiyun int i, k;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (!vmw_overlay_available(dev_priv))
483*4882a593Smuzhiyun return 0;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun mutex_lock(&overlay->mutex);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
488*4882a593Smuzhiyun if (!overlay->stream[i].claimed)
489*4882a593Smuzhiyun k++;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return k;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
vmw_overlay_claim(struct vmw_private * dev_priv,uint32_t * out)496*4882a593Smuzhiyun int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
499*4882a593Smuzhiyun int i;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (!overlay)
502*4882a593Smuzhiyun return -ENOSYS;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun mutex_lock(&overlay->mutex);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (overlay->stream[i].claimed)
509*4882a593Smuzhiyun continue;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun overlay->stream[i].claimed = true;
512*4882a593Smuzhiyun *out = i;
513*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
518*4882a593Smuzhiyun return -ESRCH;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
vmw_overlay_unref(struct vmw_private * dev_priv,uint32_t stream_id)521*4882a593Smuzhiyun int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (!overlay)
528*4882a593Smuzhiyun return -ENOSYS;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun mutex_lock(&overlay->mutex);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun WARN_ON(!overlay->stream[stream_id].claimed);
533*4882a593Smuzhiyun vmw_overlay_stop(dev_priv, stream_id, false, false);
534*4882a593Smuzhiyun overlay->stream[stream_id].claimed = false;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun mutex_unlock(&overlay->mutex);
537*4882a593Smuzhiyun return 0;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
vmw_overlay_init(struct vmw_private * dev_priv)540*4882a593Smuzhiyun int vmw_overlay_init(struct vmw_private *dev_priv)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun struct vmw_overlay *overlay;
543*4882a593Smuzhiyun int i;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (dev_priv->overlay_priv)
546*4882a593Smuzhiyun return -EINVAL;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
549*4882a593Smuzhiyun if (!overlay)
550*4882a593Smuzhiyun return -ENOMEM;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun mutex_init(&overlay->mutex);
553*4882a593Smuzhiyun for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
554*4882a593Smuzhiyun overlay->stream[i].buf = NULL;
555*4882a593Smuzhiyun overlay->stream[i].paused = false;
556*4882a593Smuzhiyun overlay->stream[i].claimed = false;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun dev_priv->overlay_priv = overlay;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun return 0;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
vmw_overlay_close(struct vmw_private * dev_priv)564*4882a593Smuzhiyun int vmw_overlay_close(struct vmw_private *dev_priv)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct vmw_overlay *overlay = dev_priv->overlay_priv;
567*4882a593Smuzhiyun bool forgotten_buffer = false;
568*4882a593Smuzhiyun int i;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (!overlay)
571*4882a593Smuzhiyun return -ENOSYS;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
574*4882a593Smuzhiyun if (overlay->stream[i].buf) {
575*4882a593Smuzhiyun forgotten_buffer = true;
576*4882a593Smuzhiyun vmw_overlay_stop(dev_priv, i, false, false);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun WARN_ON(forgotten_buffer);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun dev_priv->overlay_priv = NULL;
583*4882a593Smuzhiyun kfree(overlay);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return 0;
586*4882a593Smuzhiyun }
587