1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2013 Red Hat Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Authors: Dave Airlie
23*4882a593Smuzhiyun * Alon Levy
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* QXL cmd/ring handling */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/delay.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <drm/drm_util.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "qxl_drv.h"
33*4882a593Smuzhiyun #include "qxl_object.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct ring {
38*4882a593Smuzhiyun struct qxl_ring_header header;
39*4882a593Smuzhiyun uint8_t elements[];
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct qxl_ring {
43*4882a593Smuzhiyun struct ring *ring;
44*4882a593Smuzhiyun int element_size;
45*4882a593Smuzhiyun int n_elements;
46*4882a593Smuzhiyun int prod_notify;
47*4882a593Smuzhiyun wait_queue_head_t *push_event;
48*4882a593Smuzhiyun spinlock_t lock;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
qxl_ring_free(struct qxl_ring * ring)51*4882a593Smuzhiyun void qxl_ring_free(struct qxl_ring *ring)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun kfree(ring);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
qxl_ring_init_hdr(struct qxl_ring * ring)56*4882a593Smuzhiyun void qxl_ring_init_hdr(struct qxl_ring *ring)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun ring->ring->header.notify_on_prod = ring->n_elements;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct qxl_ring *
qxl_ring_create(struct qxl_ring_header * header,int element_size,int n_elements,int prod_notify,bool set_prod_notify,wait_queue_head_t * push_event)62*4882a593Smuzhiyun qxl_ring_create(struct qxl_ring_header *header,
63*4882a593Smuzhiyun int element_size,
64*4882a593Smuzhiyun int n_elements,
65*4882a593Smuzhiyun int prod_notify,
66*4882a593Smuzhiyun bool set_prod_notify,
67*4882a593Smuzhiyun wait_queue_head_t *push_event)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct qxl_ring *ring;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun ring = kmalloc(sizeof(*ring), GFP_KERNEL);
72*4882a593Smuzhiyun if (!ring)
73*4882a593Smuzhiyun return NULL;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun ring->ring = (struct ring *)header;
76*4882a593Smuzhiyun ring->element_size = element_size;
77*4882a593Smuzhiyun ring->n_elements = n_elements;
78*4882a593Smuzhiyun ring->prod_notify = prod_notify;
79*4882a593Smuzhiyun ring->push_event = push_event;
80*4882a593Smuzhiyun if (set_prod_notify)
81*4882a593Smuzhiyun qxl_ring_init_hdr(ring);
82*4882a593Smuzhiyun spin_lock_init(&ring->lock);
83*4882a593Smuzhiyun return ring;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
qxl_check_header(struct qxl_ring * ring)86*4882a593Smuzhiyun static int qxl_check_header(struct qxl_ring *ring)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun int ret;
89*4882a593Smuzhiyun struct qxl_ring_header *header = &(ring->ring->header);
90*4882a593Smuzhiyun unsigned long flags;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun spin_lock_irqsave(&ring->lock, flags);
93*4882a593Smuzhiyun ret = header->prod - header->cons < header->num_items;
94*4882a593Smuzhiyun if (ret == 0)
95*4882a593Smuzhiyun header->notify_on_cons = header->cons + 1;
96*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
97*4882a593Smuzhiyun return ret;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
qxl_check_idle(struct qxl_ring * ring)100*4882a593Smuzhiyun int qxl_check_idle(struct qxl_ring *ring)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun int ret;
103*4882a593Smuzhiyun struct qxl_ring_header *header = &(ring->ring->header);
104*4882a593Smuzhiyun unsigned long flags;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun spin_lock_irqsave(&ring->lock, flags);
107*4882a593Smuzhiyun ret = header->prod == header->cons;
108*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
109*4882a593Smuzhiyun return ret;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
qxl_ring_push(struct qxl_ring * ring,const void * new_elt,bool interruptible)112*4882a593Smuzhiyun int qxl_ring_push(struct qxl_ring *ring,
113*4882a593Smuzhiyun const void *new_elt, bool interruptible)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct qxl_ring_header *header = &(ring->ring->header);
116*4882a593Smuzhiyun uint8_t *elt;
117*4882a593Smuzhiyun int idx, ret;
118*4882a593Smuzhiyun unsigned long flags;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun spin_lock_irqsave(&ring->lock, flags);
121*4882a593Smuzhiyun if (header->prod - header->cons == header->num_items) {
122*4882a593Smuzhiyun header->notify_on_cons = header->cons + 1;
123*4882a593Smuzhiyun mb();
124*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
125*4882a593Smuzhiyun if (!drm_can_sleep()) {
126*4882a593Smuzhiyun while (!qxl_check_header(ring))
127*4882a593Smuzhiyun udelay(1);
128*4882a593Smuzhiyun } else {
129*4882a593Smuzhiyun if (interruptible) {
130*4882a593Smuzhiyun ret = wait_event_interruptible(*ring->push_event,
131*4882a593Smuzhiyun qxl_check_header(ring));
132*4882a593Smuzhiyun if (ret)
133*4882a593Smuzhiyun return ret;
134*4882a593Smuzhiyun } else {
135*4882a593Smuzhiyun wait_event(*ring->push_event,
136*4882a593Smuzhiyun qxl_check_header(ring));
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun spin_lock_irqsave(&ring->lock, flags);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun idx = header->prod & (ring->n_elements - 1);
144*4882a593Smuzhiyun elt = ring->ring->elements + idx * ring->element_size;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun memcpy((void *)elt, new_elt, ring->element_size);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun header->prod++;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun mb();
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (header->prod == header->notify_on_prod)
153*4882a593Smuzhiyun outb(0, ring->prod_notify);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
qxl_ring_pop(struct qxl_ring * ring,void * element)159*4882a593Smuzhiyun static bool qxl_ring_pop(struct qxl_ring *ring,
160*4882a593Smuzhiyun void *element)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun volatile struct qxl_ring_header *header = &(ring->ring->header);
163*4882a593Smuzhiyun volatile uint8_t *ring_elt;
164*4882a593Smuzhiyun int idx;
165*4882a593Smuzhiyun unsigned long flags;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun spin_lock_irqsave(&ring->lock, flags);
168*4882a593Smuzhiyun if (header->cons == header->prod) {
169*4882a593Smuzhiyun header->notify_on_prod = header->cons + 1;
170*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
171*4882a593Smuzhiyun return false;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun idx = header->cons & (ring->n_elements - 1);
175*4882a593Smuzhiyun ring_elt = ring->ring->elements + idx * ring->element_size;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun memcpy(element, (void *)ring_elt, ring->element_size);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun header->cons++;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->lock, flags);
182*4882a593Smuzhiyun return true;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun int
qxl_push_command_ring_release(struct qxl_device * qdev,struct qxl_release * release,uint32_t type,bool interruptible)186*4882a593Smuzhiyun qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
187*4882a593Smuzhiyun uint32_t type, bool interruptible)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct qxl_command cmd;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun cmd.type = type;
192*4882a593Smuzhiyun cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun int
qxl_push_cursor_ring_release(struct qxl_device * qdev,struct qxl_release * release,uint32_t type,bool interruptible)198*4882a593Smuzhiyun qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
199*4882a593Smuzhiyun uint32_t type, bool interruptible)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct qxl_command cmd;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun cmd.type = type;
204*4882a593Smuzhiyun cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
qxl_queue_garbage_collect(struct qxl_device * qdev,bool flush)209*4882a593Smuzhiyun bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun if (!qxl_check_idle(qdev->release_ring)) {
212*4882a593Smuzhiyun schedule_work(&qdev->gc_work);
213*4882a593Smuzhiyun if (flush)
214*4882a593Smuzhiyun flush_work(&qdev->gc_work);
215*4882a593Smuzhiyun return true;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun return false;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
qxl_garbage_collect(struct qxl_device * qdev)220*4882a593Smuzhiyun int qxl_garbage_collect(struct qxl_device *qdev)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct qxl_release *release;
223*4882a593Smuzhiyun uint64_t id, next_id;
224*4882a593Smuzhiyun int i = 0;
225*4882a593Smuzhiyun union qxl_release_info *info;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun while (qxl_ring_pop(qdev->release_ring, &id)) {
228*4882a593Smuzhiyun DRM_DEBUG_DRIVER("popped %lld\n", id);
229*4882a593Smuzhiyun while (id) {
230*4882a593Smuzhiyun release = qxl_release_from_id_locked(qdev, id);
231*4882a593Smuzhiyun if (release == NULL)
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun info = qxl_release_map(qdev, release);
235*4882a593Smuzhiyun next_id = info->next;
236*4882a593Smuzhiyun qxl_release_unmap(qdev, release, info);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
239*4882a593Smuzhiyun next_id);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun switch (release->type) {
242*4882a593Smuzhiyun case QXL_RELEASE_DRAWABLE:
243*4882a593Smuzhiyun case QXL_RELEASE_SURFACE_CMD:
244*4882a593Smuzhiyun case QXL_RELEASE_CURSOR_CMD:
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun default:
247*4882a593Smuzhiyun DRM_ERROR("unexpected release type\n");
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun id = next_id;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun qxl_release_free(qdev, release);
253*4882a593Smuzhiyun ++i;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun DRM_DEBUG_DRIVER("%d\n", i);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return i;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
qxl_alloc_bo_reserved(struct qxl_device * qdev,struct qxl_release * release,unsigned long size,struct qxl_bo ** _bo)262*4882a593Smuzhiyun int qxl_alloc_bo_reserved(struct qxl_device *qdev,
263*4882a593Smuzhiyun struct qxl_release *release,
264*4882a593Smuzhiyun unsigned long size,
265*4882a593Smuzhiyun struct qxl_bo **_bo)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct qxl_bo *bo;
268*4882a593Smuzhiyun int ret;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
271*4882a593Smuzhiyun false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
272*4882a593Smuzhiyun if (ret) {
273*4882a593Smuzhiyun DRM_ERROR("failed to allocate VRAM BO\n");
274*4882a593Smuzhiyun return ret;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun ret = qxl_release_list_add(release, bo);
277*4882a593Smuzhiyun if (ret)
278*4882a593Smuzhiyun goto out_unref;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun *_bo = bo;
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun out_unref:
283*4882a593Smuzhiyun qxl_bo_unref(&bo);
284*4882a593Smuzhiyun return ret;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
wait_for_io_cmd_user(struct qxl_device * qdev,uint8_t val,long port,bool intr)287*4882a593Smuzhiyun static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun int irq_num;
290*4882a593Smuzhiyun long addr = qdev->io_base + port;
291*4882a593Smuzhiyun int ret;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun mutex_lock(&qdev->async_io_mutex);
294*4882a593Smuzhiyun irq_num = atomic_read(&qdev->irq_received_io_cmd);
295*4882a593Smuzhiyun if (qdev->last_sent_io_cmd > irq_num) {
296*4882a593Smuzhiyun if (intr)
297*4882a593Smuzhiyun ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
298*4882a593Smuzhiyun atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
299*4882a593Smuzhiyun else
300*4882a593Smuzhiyun ret = wait_event_timeout(qdev->io_cmd_event,
301*4882a593Smuzhiyun atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
302*4882a593Smuzhiyun /* 0 is timeout, just bail the "hw" has gone away */
303*4882a593Smuzhiyun if (ret <= 0)
304*4882a593Smuzhiyun goto out;
305*4882a593Smuzhiyun irq_num = atomic_read(&qdev->irq_received_io_cmd);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun outb(val, addr);
308*4882a593Smuzhiyun qdev->last_sent_io_cmd = irq_num + 1;
309*4882a593Smuzhiyun if (intr)
310*4882a593Smuzhiyun ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
311*4882a593Smuzhiyun atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
312*4882a593Smuzhiyun else
313*4882a593Smuzhiyun ret = wait_event_timeout(qdev->io_cmd_event,
314*4882a593Smuzhiyun atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
315*4882a593Smuzhiyun out:
316*4882a593Smuzhiyun if (ret > 0)
317*4882a593Smuzhiyun ret = 0;
318*4882a593Smuzhiyun mutex_unlock(&qdev->async_io_mutex);
319*4882a593Smuzhiyun return ret;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
wait_for_io_cmd(struct qxl_device * qdev,uint8_t val,long port)322*4882a593Smuzhiyun static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun int ret;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun restart:
327*4882a593Smuzhiyun ret = wait_for_io_cmd_user(qdev, val, port, false);
328*4882a593Smuzhiyun if (ret == -ERESTARTSYS)
329*4882a593Smuzhiyun goto restart;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
qxl_io_update_area(struct qxl_device * qdev,struct qxl_bo * surf,const struct qxl_rect * area)332*4882a593Smuzhiyun int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
333*4882a593Smuzhiyun const struct qxl_rect *area)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun int surface_id;
336*4882a593Smuzhiyun uint32_t surface_width, surface_height;
337*4882a593Smuzhiyun int ret;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (!surf->hw_surf_alloc)
340*4882a593Smuzhiyun DRM_ERROR("got io update area with no hw surface\n");
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (surf->is_primary)
343*4882a593Smuzhiyun surface_id = 0;
344*4882a593Smuzhiyun else
345*4882a593Smuzhiyun surface_id = surf->surface_id;
346*4882a593Smuzhiyun surface_width = surf->surf.width;
347*4882a593Smuzhiyun surface_height = surf->surf.height;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (area->left < 0 || area->top < 0 ||
350*4882a593Smuzhiyun area->right > surface_width || area->bottom > surface_height)
351*4882a593Smuzhiyun return -EINVAL;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun mutex_lock(&qdev->update_area_mutex);
354*4882a593Smuzhiyun qdev->ram_header->update_area = *area;
355*4882a593Smuzhiyun qdev->ram_header->update_surface = surface_id;
356*4882a593Smuzhiyun ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
357*4882a593Smuzhiyun mutex_unlock(&qdev->update_area_mutex);
358*4882a593Smuzhiyun return ret;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
qxl_io_notify_oom(struct qxl_device * qdev)361*4882a593Smuzhiyun void qxl_io_notify_oom(struct qxl_device *qdev)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
qxl_io_flush_release(struct qxl_device * qdev)366*4882a593Smuzhiyun void qxl_io_flush_release(struct qxl_device *qdev)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
qxl_io_flush_surfaces(struct qxl_device * qdev)371*4882a593Smuzhiyun void qxl_io_flush_surfaces(struct qxl_device *qdev)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
qxl_io_destroy_primary(struct qxl_device * qdev)376*4882a593Smuzhiyun void qxl_io_destroy_primary(struct qxl_device *qdev)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
379*4882a593Smuzhiyun qdev->primary_bo->is_primary = false;
380*4882a593Smuzhiyun drm_gem_object_put(&qdev->primary_bo->tbo.base);
381*4882a593Smuzhiyun qdev->primary_bo = NULL;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
qxl_io_create_primary(struct qxl_device * qdev,struct qxl_bo * bo)384*4882a593Smuzhiyun void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct qxl_surface_create *create;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (WARN_ON(qdev->primary_bo))
389*4882a593Smuzhiyun return;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
392*4882a593Smuzhiyun create = &qdev->ram_header->create_surface;
393*4882a593Smuzhiyun create->format = bo->surf.format;
394*4882a593Smuzhiyun create->width = bo->surf.width;
395*4882a593Smuzhiyun create->height = bo->surf.height;
396*4882a593Smuzhiyun create->stride = bo->surf.stride;
397*4882a593Smuzhiyun create->mem = qxl_bo_physical_address(qdev, bo, 0);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun create->flags = QXL_SURF_FLAG_KEEP_DATA;
402*4882a593Smuzhiyun create->type = QXL_SURF_TYPE_PRIMARY;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
405*4882a593Smuzhiyun qdev->primary_bo = bo;
406*4882a593Smuzhiyun qdev->primary_bo->is_primary = true;
407*4882a593Smuzhiyun drm_gem_object_get(&qdev->primary_bo->tbo.base);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
qxl_io_memslot_add(struct qxl_device * qdev,uint8_t id)410*4882a593Smuzhiyun void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
413*4882a593Smuzhiyun wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
qxl_io_reset(struct qxl_device * qdev)416*4882a593Smuzhiyun void qxl_io_reset(struct qxl_device *qdev)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun outb(0, qdev->io_base + QXL_IO_RESET);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
qxl_io_monitors_config(struct qxl_device * qdev)421*4882a593Smuzhiyun void qxl_io_monitors_config(struct qxl_device *qdev)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
qxl_surface_id_alloc(struct qxl_device * qdev,struct qxl_bo * surf)426*4882a593Smuzhiyun int qxl_surface_id_alloc(struct qxl_device *qdev,
427*4882a593Smuzhiyun struct qxl_bo *surf)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun uint32_t handle;
430*4882a593Smuzhiyun int idr_ret;
431*4882a593Smuzhiyun int count = 0;
432*4882a593Smuzhiyun again:
433*4882a593Smuzhiyun idr_preload(GFP_ATOMIC);
434*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
435*4882a593Smuzhiyun idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
436*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
437*4882a593Smuzhiyun idr_preload_end();
438*4882a593Smuzhiyun if (idr_ret < 0)
439*4882a593Smuzhiyun return idr_ret;
440*4882a593Smuzhiyun handle = idr_ret;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (handle >= qdev->rom->n_surfaces) {
443*4882a593Smuzhiyun count++;
444*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
445*4882a593Smuzhiyun idr_remove(&qdev->surf_id_idr, handle);
446*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
447*4882a593Smuzhiyun qxl_reap_surface_id(qdev, 2);
448*4882a593Smuzhiyun goto again;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun surf->surface_id = handle;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
453*4882a593Smuzhiyun qdev->last_alloced_surf_id = handle;
454*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
455*4882a593Smuzhiyun return 0;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
qxl_surface_id_dealloc(struct qxl_device * qdev,uint32_t surface_id)458*4882a593Smuzhiyun void qxl_surface_id_dealloc(struct qxl_device *qdev,
459*4882a593Smuzhiyun uint32_t surface_id)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
462*4882a593Smuzhiyun idr_remove(&qdev->surf_id_idr, surface_id);
463*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
qxl_hw_surface_alloc(struct qxl_device * qdev,struct qxl_bo * surf)466*4882a593Smuzhiyun int qxl_hw_surface_alloc(struct qxl_device *qdev,
467*4882a593Smuzhiyun struct qxl_bo *surf)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct qxl_surface_cmd *cmd;
470*4882a593Smuzhiyun struct qxl_release *release;
471*4882a593Smuzhiyun int ret;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (surf->hw_surf_alloc)
474*4882a593Smuzhiyun return 0;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
477*4882a593Smuzhiyun NULL,
478*4882a593Smuzhiyun &release);
479*4882a593Smuzhiyun if (ret)
480*4882a593Smuzhiyun return ret;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun ret = qxl_release_reserve_list(release, true);
483*4882a593Smuzhiyun if (ret) {
484*4882a593Smuzhiyun qxl_release_free(qdev, release);
485*4882a593Smuzhiyun return ret;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
488*4882a593Smuzhiyun cmd->type = QXL_SURFACE_CMD_CREATE;
489*4882a593Smuzhiyun cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
490*4882a593Smuzhiyun cmd->u.surface_create.format = surf->surf.format;
491*4882a593Smuzhiyun cmd->u.surface_create.width = surf->surf.width;
492*4882a593Smuzhiyun cmd->u.surface_create.height = surf->surf.height;
493*4882a593Smuzhiyun cmd->u.surface_create.stride = surf->surf.stride;
494*4882a593Smuzhiyun cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
495*4882a593Smuzhiyun cmd->surface_id = surf->surface_id;
496*4882a593Smuzhiyun qxl_release_unmap(qdev, release, &cmd->release_info);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun surf->surf_create = release;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* no need to add a release to the fence for this surface bo,
501*4882a593Smuzhiyun since it is only released when we ask to destroy the surface
502*4882a593Smuzhiyun and it would never signal otherwise */
503*4882a593Smuzhiyun qxl_release_fence_buffer_objects(release);
504*4882a593Smuzhiyun qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun surf->hw_surf_alloc = true;
507*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
508*4882a593Smuzhiyun idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
509*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
qxl_hw_surface_dealloc(struct qxl_device * qdev,struct qxl_bo * surf)513*4882a593Smuzhiyun int qxl_hw_surface_dealloc(struct qxl_device *qdev,
514*4882a593Smuzhiyun struct qxl_bo *surf)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct qxl_surface_cmd *cmd;
517*4882a593Smuzhiyun struct qxl_release *release;
518*4882a593Smuzhiyun int ret;
519*4882a593Smuzhiyun int id;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (!surf->hw_surf_alloc)
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
525*4882a593Smuzhiyun surf->surf_create,
526*4882a593Smuzhiyun &release);
527*4882a593Smuzhiyun if (ret)
528*4882a593Smuzhiyun return ret;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun surf->surf_create = NULL;
531*4882a593Smuzhiyun /* remove the surface from the idr, but not the surface id yet */
532*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
533*4882a593Smuzhiyun idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
534*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
535*4882a593Smuzhiyun surf->hw_surf_alloc = false;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun id = surf->surface_id;
538*4882a593Smuzhiyun surf->surface_id = 0;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun release->surface_release_id = id;
541*4882a593Smuzhiyun cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
542*4882a593Smuzhiyun cmd->type = QXL_SURFACE_CMD_DESTROY;
543*4882a593Smuzhiyun cmd->surface_id = id;
544*4882a593Smuzhiyun qxl_release_unmap(qdev, release, &cmd->release_info);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun qxl_release_fence_buffer_objects(release);
547*4882a593Smuzhiyun qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun return 0;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
qxl_update_surface(struct qxl_device * qdev,struct qxl_bo * surf)552*4882a593Smuzhiyun static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun struct qxl_rect rect;
555*4882a593Smuzhiyun int ret;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /* if we are evicting, we need to make sure the surface is up
558*4882a593Smuzhiyun to date */
559*4882a593Smuzhiyun rect.left = 0;
560*4882a593Smuzhiyun rect.right = surf->surf.width;
561*4882a593Smuzhiyun rect.top = 0;
562*4882a593Smuzhiyun rect.bottom = surf->surf.height;
563*4882a593Smuzhiyun retry:
564*4882a593Smuzhiyun ret = qxl_io_update_area(qdev, surf, &rect);
565*4882a593Smuzhiyun if (ret == -ERESTARTSYS)
566*4882a593Smuzhiyun goto retry;
567*4882a593Smuzhiyun return ret;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
qxl_surface_evict_locked(struct qxl_device * qdev,struct qxl_bo * surf,bool do_update_area)570*4882a593Smuzhiyun static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun /* no need to update area if we are just freeing the surface normally */
573*4882a593Smuzhiyun if (do_update_area)
574*4882a593Smuzhiyun qxl_update_surface(qdev, surf);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* nuke the surface id at the hw */
577*4882a593Smuzhiyun qxl_hw_surface_dealloc(qdev, surf);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
qxl_surface_evict(struct qxl_device * qdev,struct qxl_bo * surf,bool do_update_area)580*4882a593Smuzhiyun void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun mutex_lock(&qdev->surf_evict_mutex);
583*4882a593Smuzhiyun qxl_surface_evict_locked(qdev, surf, do_update_area);
584*4882a593Smuzhiyun mutex_unlock(&qdev->surf_evict_mutex);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
qxl_reap_surf(struct qxl_device * qdev,struct qxl_bo * surf,bool stall)587*4882a593Smuzhiyun static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun int ret;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ret = qxl_bo_reserve(surf);
592*4882a593Smuzhiyun if (ret)
593*4882a593Smuzhiyun return ret;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (stall)
596*4882a593Smuzhiyun mutex_unlock(&qdev->surf_evict_mutex);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun ret = ttm_bo_wait(&surf->tbo, true, !stall);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun if (stall)
601*4882a593Smuzhiyun mutex_lock(&qdev->surf_evict_mutex);
602*4882a593Smuzhiyun if (ret) {
603*4882a593Smuzhiyun qxl_bo_unreserve(surf);
604*4882a593Smuzhiyun return ret;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun qxl_surface_evict_locked(qdev, surf, true);
608*4882a593Smuzhiyun qxl_bo_unreserve(surf);
609*4882a593Smuzhiyun return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
qxl_reap_surface_id(struct qxl_device * qdev,int max_to_reap)612*4882a593Smuzhiyun static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun int num_reaped = 0;
615*4882a593Smuzhiyun int i, ret;
616*4882a593Smuzhiyun bool stall = false;
617*4882a593Smuzhiyun int start = 0;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun mutex_lock(&qdev->surf_evict_mutex);
620*4882a593Smuzhiyun again:
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
623*4882a593Smuzhiyun start = qdev->last_alloced_surf_id + 1;
624*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun for (i = start; i < start + qdev->rom->n_surfaces; i++) {
627*4882a593Smuzhiyun void *objptr;
628*4882a593Smuzhiyun int surfid = i % qdev->rom->n_surfaces;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* this avoids the case where the objects is in the
631*4882a593Smuzhiyun idr but has been evicted half way - its makes
632*4882a593Smuzhiyun the idr lookup atomic with the eviction */
633*4882a593Smuzhiyun spin_lock(&qdev->surf_id_idr_lock);
634*4882a593Smuzhiyun objptr = idr_find(&qdev->surf_id_idr, surfid);
635*4882a593Smuzhiyun spin_unlock(&qdev->surf_id_idr_lock);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (!objptr)
638*4882a593Smuzhiyun continue;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun ret = qxl_reap_surf(qdev, objptr, stall);
641*4882a593Smuzhiyun if (ret == 0)
642*4882a593Smuzhiyun num_reaped++;
643*4882a593Smuzhiyun if (num_reaped >= max_to_reap)
644*4882a593Smuzhiyun break;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun if (num_reaped == 0 && stall == false) {
647*4882a593Smuzhiyun stall = true;
648*4882a593Smuzhiyun goto again;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun mutex_unlock(&qdev->surf_evict_mutex);
652*4882a593Smuzhiyun if (num_reaped) {
653*4882a593Smuzhiyun usleep_range(500, 1000);
654*4882a593Smuzhiyun qxl_queue_garbage_collect(qdev, true);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun return 0;
658*4882a593Smuzhiyun }
659