xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i810/i810_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2*4882a593Smuzhiyun  * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5*4882a593Smuzhiyun  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6*4882a593Smuzhiyun  * All Rights Reserved.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
9*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
10*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
11*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
13*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
16*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
17*4882a593Smuzhiyun  * Software.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22*4882a593Smuzhiyun  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28*4882a593Smuzhiyun  *	    Jeff Hartmann <jhartmann@valinux.com>
29*4882a593Smuzhiyun  *          Keith Whitwell <keith@tungstengraphics.com>
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <linux/delay.h>
34*4882a593Smuzhiyun #include <linux/mman.h>
35*4882a593Smuzhiyun #include <linux/pci.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <drm/drm_agpsupport.h>
38*4882a593Smuzhiyun #include <drm/drm_device.h>
39*4882a593Smuzhiyun #include <drm/drm_drv.h>
40*4882a593Smuzhiyun #include <drm/drm_file.h>
41*4882a593Smuzhiyun #include <drm/drm_ioctl.h>
42*4882a593Smuzhiyun #include <drm/drm_irq.h>
43*4882a593Smuzhiyun #include <drm/drm_print.h>
44*4882a593Smuzhiyun #include <drm/i810_drm.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include "i810_drv.h"
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define I810_BUF_FREE		2
49*4882a593Smuzhiyun #define I810_BUF_CLIENT		1
50*4882a593Smuzhiyun #define I810_BUF_HARDWARE	0
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define I810_BUF_UNMAPPED 0
53*4882a593Smuzhiyun #define I810_BUF_MAPPED   1
54*4882a593Smuzhiyun 
i810_freelist_get(struct drm_device * dev)55*4882a593Smuzhiyun static struct drm_buf *i810_freelist_get(struct drm_device * dev)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
58*4882a593Smuzhiyun 	int i;
59*4882a593Smuzhiyun 	int used;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Linear search might not be the best solution */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	for (i = 0; i < dma->buf_count; i++) {
64*4882a593Smuzhiyun 		struct drm_buf *buf = dma->buflist[i];
65*4882a593Smuzhiyun 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
66*4882a593Smuzhiyun 		/* In use is already a pointer */
67*4882a593Smuzhiyun 		used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
68*4882a593Smuzhiyun 			       I810_BUF_CLIENT);
69*4882a593Smuzhiyun 		if (used == I810_BUF_FREE)
70*4882a593Smuzhiyun 			return buf;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 	return NULL;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* This should only be called if the buffer is not sent to the hardware
76*4882a593Smuzhiyun  * yet, the hardware updates in use for us once its on the ring buffer.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun 
i810_freelist_put(struct drm_device * dev,struct drm_buf * buf)79*4882a593Smuzhiyun static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
82*4882a593Smuzhiyun 	int used;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* In use is already a pointer */
85*4882a593Smuzhiyun 	used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
86*4882a593Smuzhiyun 	if (used != I810_BUF_CLIENT) {
87*4882a593Smuzhiyun 		DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
88*4882a593Smuzhiyun 		return -EINVAL;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
i810_mmap_buffers(struct file * filp,struct vm_area_struct * vma)94*4882a593Smuzhiyun static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct drm_file *priv = filp->private_data;
97*4882a593Smuzhiyun 	struct drm_device *dev;
98*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv;
99*4882a593Smuzhiyun 	struct drm_buf *buf;
100*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	dev = priv->minor->dev;
103*4882a593Smuzhiyun 	dev_priv = dev->dev_private;
104*4882a593Smuzhiyun 	buf = dev_priv->mmap_buffer;
105*4882a593Smuzhiyun 	buf_priv = buf->dev_private;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	vma->vm_flags |= VM_DONTCOPY;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	buf_priv->currently_mapped = I810_BUF_MAPPED;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (io_remap_pfn_range(vma, vma->vm_start,
112*4882a593Smuzhiyun 			       vma->vm_pgoff,
113*4882a593Smuzhiyun 			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
114*4882a593Smuzhiyun 		return -EAGAIN;
115*4882a593Smuzhiyun 	return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static const struct file_operations i810_buffer_fops = {
119*4882a593Smuzhiyun 	.open = drm_open,
120*4882a593Smuzhiyun 	.release = drm_release,
121*4882a593Smuzhiyun 	.unlocked_ioctl = drm_ioctl,
122*4882a593Smuzhiyun 	.mmap = i810_mmap_buffers,
123*4882a593Smuzhiyun 	.compat_ioctl = drm_compat_ioctl,
124*4882a593Smuzhiyun 	.llseek = noop_llseek,
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun 
i810_map_buffer(struct drm_buf * buf,struct drm_file * file_priv)127*4882a593Smuzhiyun static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct drm_device *dev = file_priv->minor->dev;
130*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
131*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
132*4882a593Smuzhiyun 	const struct file_operations *old_fops;
133*4882a593Smuzhiyun 	int retcode = 0;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (buf_priv->currently_mapped == I810_BUF_MAPPED)
136*4882a593Smuzhiyun 		return -EINVAL;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* This is all entirely broken */
139*4882a593Smuzhiyun 	old_fops = file_priv->filp->f_op;
140*4882a593Smuzhiyun 	file_priv->filp->f_op = &i810_buffer_fops;
141*4882a593Smuzhiyun 	dev_priv->mmap_buffer = buf;
142*4882a593Smuzhiyun 	buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
143*4882a593Smuzhiyun 					    PROT_READ | PROT_WRITE,
144*4882a593Smuzhiyun 					    MAP_SHARED, buf->bus_address);
145*4882a593Smuzhiyun 	dev_priv->mmap_buffer = NULL;
146*4882a593Smuzhiyun 	file_priv->filp->f_op = old_fops;
147*4882a593Smuzhiyun 	if (IS_ERR(buf_priv->virtual)) {
148*4882a593Smuzhiyun 		/* Real error */
149*4882a593Smuzhiyun 		DRM_ERROR("mmap error\n");
150*4882a593Smuzhiyun 		retcode = PTR_ERR(buf_priv->virtual);
151*4882a593Smuzhiyun 		buf_priv->virtual = NULL;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return retcode;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
i810_unmap_buffer(struct drm_buf * buf)157*4882a593Smuzhiyun static int i810_unmap_buffer(struct drm_buf *buf)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
160*4882a593Smuzhiyun 	int retcode = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (buf_priv->currently_mapped != I810_BUF_MAPPED)
163*4882a593Smuzhiyun 		return -EINVAL;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	retcode = vm_munmap((unsigned long)buf_priv->virtual,
166*4882a593Smuzhiyun 			    (size_t) buf->total);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	buf_priv->currently_mapped = I810_BUF_UNMAPPED;
169*4882a593Smuzhiyun 	buf_priv->virtual = NULL;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return retcode;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
i810_dma_get_buffer(struct drm_device * dev,drm_i810_dma_t * d,struct drm_file * file_priv)174*4882a593Smuzhiyun static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
175*4882a593Smuzhiyun 			       struct drm_file *file_priv)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct drm_buf *buf;
178*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv;
179*4882a593Smuzhiyun 	int retcode = 0;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	buf = i810_freelist_get(dev);
182*4882a593Smuzhiyun 	if (!buf) {
183*4882a593Smuzhiyun 		retcode = -ENOMEM;
184*4882a593Smuzhiyun 		DRM_DEBUG("retcode=%d\n", retcode);
185*4882a593Smuzhiyun 		return retcode;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	retcode = i810_map_buffer(buf, file_priv);
189*4882a593Smuzhiyun 	if (retcode) {
190*4882a593Smuzhiyun 		i810_freelist_put(dev, buf);
191*4882a593Smuzhiyun 		DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
192*4882a593Smuzhiyun 		return retcode;
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 	buf->file_priv = file_priv;
195*4882a593Smuzhiyun 	buf_priv = buf->dev_private;
196*4882a593Smuzhiyun 	d->granted = 1;
197*4882a593Smuzhiyun 	d->request_idx = buf->idx;
198*4882a593Smuzhiyun 	d->request_size = buf->total;
199*4882a593Smuzhiyun 	d->virtual = buf_priv->virtual;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return retcode;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
i810_dma_cleanup(struct drm_device * dev)204*4882a593Smuzhiyun static int i810_dma_cleanup(struct drm_device *dev)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/* Make sure interrupts are disabled here because the uninstall ioctl
209*4882a593Smuzhiyun 	 * may not have been called from userspace and after dev_private
210*4882a593Smuzhiyun 	 * is freed, it's too late.
211*4882a593Smuzhiyun 	 */
212*4882a593Smuzhiyun 	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
213*4882a593Smuzhiyun 		drm_irq_uninstall(dev);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (dev->dev_private) {
216*4882a593Smuzhiyun 		int i;
217*4882a593Smuzhiyun 		drm_i810_private_t *dev_priv =
218*4882a593Smuzhiyun 		    (drm_i810_private_t *) dev->dev_private;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		if (dev_priv->ring.virtual_start)
221*4882a593Smuzhiyun 			drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
222*4882a593Smuzhiyun 		if (dev_priv->hw_status_page) {
223*4882a593Smuzhiyun 			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
224*4882a593Smuzhiyun 					  dev_priv->hw_status_page,
225*4882a593Smuzhiyun 					  dev_priv->dma_status_page);
226*4882a593Smuzhiyun 		}
227*4882a593Smuzhiyun 		kfree(dev->dev_private);
228*4882a593Smuzhiyun 		dev->dev_private = NULL;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		for (i = 0; i < dma->buf_count; i++) {
231*4882a593Smuzhiyun 			struct drm_buf *buf = dma->buflist[i];
232*4882a593Smuzhiyun 			drm_i810_buf_priv_t *buf_priv = buf->dev_private;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			if (buf_priv->kernel_virtual && buf->total)
235*4882a593Smuzhiyun 				drm_legacy_ioremapfree(&buf_priv->map, dev);
236*4882a593Smuzhiyun 		}
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
i810_wait_ring(struct drm_device * dev,int n)241*4882a593Smuzhiyun static int i810_wait_ring(struct drm_device *dev, int n)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
244*4882a593Smuzhiyun 	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
245*4882a593Smuzhiyun 	int iters = 0;
246*4882a593Smuzhiyun 	unsigned long end;
247*4882a593Smuzhiyun 	unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	end = jiffies + (HZ * 3);
250*4882a593Smuzhiyun 	while (ring->space < n) {
251*4882a593Smuzhiyun 		ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
252*4882a593Smuzhiyun 		ring->space = ring->head - (ring->tail + 8);
253*4882a593Smuzhiyun 		if (ring->space < 0)
254*4882a593Smuzhiyun 			ring->space += ring->Size;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		if (ring->head != last_head) {
257*4882a593Smuzhiyun 			end = jiffies + (HZ * 3);
258*4882a593Smuzhiyun 			last_head = ring->head;
259*4882a593Smuzhiyun 		}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		iters++;
262*4882a593Smuzhiyun 		if (time_before(end, jiffies)) {
263*4882a593Smuzhiyun 			DRM_ERROR("space: %d wanted %d\n", ring->space, n);
264*4882a593Smuzhiyun 			DRM_ERROR("lockup\n");
265*4882a593Smuzhiyun 			goto out_wait_ring;
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 		udelay(1);
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun out_wait_ring:
271*4882a593Smuzhiyun 	return iters;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
i810_kernel_lost_context(struct drm_device * dev)274*4882a593Smuzhiyun static void i810_kernel_lost_context(struct drm_device *dev)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
277*4882a593Smuzhiyun 	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
280*4882a593Smuzhiyun 	ring->tail = I810_READ(LP_RING + RING_TAIL);
281*4882a593Smuzhiyun 	ring->space = ring->head - (ring->tail + 8);
282*4882a593Smuzhiyun 	if (ring->space < 0)
283*4882a593Smuzhiyun 		ring->space += ring->Size;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
i810_freelist_init(struct drm_device * dev,drm_i810_private_t * dev_priv)286*4882a593Smuzhiyun static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
289*4882a593Smuzhiyun 	int my_idx = 24;
290*4882a593Smuzhiyun 	u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
291*4882a593Smuzhiyun 	int i;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (dma->buf_count > 1019) {
294*4882a593Smuzhiyun 		/* Not enough space in the status page for the freelist */
295*4882a593Smuzhiyun 		return -EINVAL;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	for (i = 0; i < dma->buf_count; i++) {
299*4882a593Smuzhiyun 		struct drm_buf *buf = dma->buflist[i];
300*4882a593Smuzhiyun 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		buf_priv->in_use = hw_status++;
303*4882a593Smuzhiyun 		buf_priv->my_use_idx = my_idx;
304*4882a593Smuzhiyun 		my_idx += 4;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 		*buf_priv->in_use = I810_BUF_FREE;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		buf_priv->map.offset = buf->bus_address;
309*4882a593Smuzhiyun 		buf_priv->map.size = buf->total;
310*4882a593Smuzhiyun 		buf_priv->map.type = _DRM_AGP;
311*4882a593Smuzhiyun 		buf_priv->map.flags = 0;
312*4882a593Smuzhiyun 		buf_priv->map.mtrr = 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		drm_legacy_ioremap(&buf_priv->map, dev);
315*4882a593Smuzhiyun 		buf_priv->kernel_virtual = buf_priv->map.handle;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 	return 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
i810_dma_initialize(struct drm_device * dev,drm_i810_private_t * dev_priv,drm_i810_init_t * init)321*4882a593Smuzhiyun static int i810_dma_initialize(struct drm_device *dev,
322*4882a593Smuzhiyun 			       drm_i810_private_t *dev_priv,
323*4882a593Smuzhiyun 			       drm_i810_init_t *init)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct drm_map_list *r_list;
326*4882a593Smuzhiyun 	memset(dev_priv, 0, sizeof(drm_i810_private_t));
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	list_for_each_entry(r_list, &dev->maplist, head) {
329*4882a593Smuzhiyun 		if (r_list->map &&
330*4882a593Smuzhiyun 		    r_list->map->type == _DRM_SHM &&
331*4882a593Smuzhiyun 		    r_list->map->flags & _DRM_CONTAINS_LOCK) {
332*4882a593Smuzhiyun 			dev_priv->sarea_map = r_list->map;
333*4882a593Smuzhiyun 			break;
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 	if (!dev_priv->sarea_map) {
337*4882a593Smuzhiyun 		dev->dev_private = (void *)dev_priv;
338*4882a593Smuzhiyun 		i810_dma_cleanup(dev);
339*4882a593Smuzhiyun 		DRM_ERROR("can not find sarea!\n");
340*4882a593Smuzhiyun 		return -EINVAL;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 	dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset);
343*4882a593Smuzhiyun 	if (!dev_priv->mmio_map) {
344*4882a593Smuzhiyun 		dev->dev_private = (void *)dev_priv;
345*4882a593Smuzhiyun 		i810_dma_cleanup(dev);
346*4882a593Smuzhiyun 		DRM_ERROR("can not find mmio map!\n");
347*4882a593Smuzhiyun 		return -EINVAL;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 	dev->agp_buffer_token = init->buffers_offset;
350*4882a593Smuzhiyun 	dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
351*4882a593Smuzhiyun 	if (!dev->agp_buffer_map) {
352*4882a593Smuzhiyun 		dev->dev_private = (void *)dev_priv;
353*4882a593Smuzhiyun 		i810_dma_cleanup(dev);
354*4882a593Smuzhiyun 		DRM_ERROR("can not find dma buffer map!\n");
355*4882a593Smuzhiyun 		return -EINVAL;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	dev_priv->sarea_priv = (drm_i810_sarea_t *)
359*4882a593Smuzhiyun 	    ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	dev_priv->ring.Start = init->ring_start;
362*4882a593Smuzhiyun 	dev_priv->ring.End = init->ring_end;
363*4882a593Smuzhiyun 	dev_priv->ring.Size = init->ring_size;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
366*4882a593Smuzhiyun 	dev_priv->ring.map.size = init->ring_size;
367*4882a593Smuzhiyun 	dev_priv->ring.map.type = _DRM_AGP;
368*4882a593Smuzhiyun 	dev_priv->ring.map.flags = 0;
369*4882a593Smuzhiyun 	dev_priv->ring.map.mtrr = 0;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	drm_legacy_ioremap(&dev_priv->ring.map, dev);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (dev_priv->ring.map.handle == NULL) {
374*4882a593Smuzhiyun 		dev->dev_private = (void *)dev_priv;
375*4882a593Smuzhiyun 		i810_dma_cleanup(dev);
376*4882a593Smuzhiyun 		DRM_ERROR("can not ioremap virtual address for"
377*4882a593Smuzhiyun 			  " ring buffer\n");
378*4882a593Smuzhiyun 		return -ENOMEM;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	dev_priv->w = init->w;
386*4882a593Smuzhiyun 	dev_priv->h = init->h;
387*4882a593Smuzhiyun 	dev_priv->pitch = init->pitch;
388*4882a593Smuzhiyun 	dev_priv->back_offset = init->back_offset;
389*4882a593Smuzhiyun 	dev_priv->depth_offset = init->depth_offset;
390*4882a593Smuzhiyun 	dev_priv->front_offset = init->front_offset;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	dev_priv->overlay_offset = init->overlay_offset;
393*4882a593Smuzhiyun 	dev_priv->overlay_physical = init->overlay_physical;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	dev_priv->front_di1 = init->front_offset | init->pitch_bits;
396*4882a593Smuzhiyun 	dev_priv->back_di1 = init->back_offset | init->pitch_bits;
397*4882a593Smuzhiyun 	dev_priv->zi1 = init->depth_offset | init->pitch_bits;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Program Hardware Status Page */
400*4882a593Smuzhiyun 	dev_priv->hw_status_page =
401*4882a593Smuzhiyun 		dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
402*4882a593Smuzhiyun 				   &dev_priv->dma_status_page, GFP_KERNEL);
403*4882a593Smuzhiyun 	if (!dev_priv->hw_status_page) {
404*4882a593Smuzhiyun 		dev->dev_private = (void *)dev_priv;
405*4882a593Smuzhiyun 		i810_dma_cleanup(dev);
406*4882a593Smuzhiyun 		DRM_ERROR("Can not allocate hardware status page\n");
407*4882a593Smuzhiyun 		return -ENOMEM;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	I810_WRITE(0x02080, dev_priv->dma_status_page);
412*4882a593Smuzhiyun 	DRM_DEBUG("Enabled hardware status page\n");
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/* Now we need to init our freelist */
415*4882a593Smuzhiyun 	if (i810_freelist_init(dev, dev_priv) != 0) {
416*4882a593Smuzhiyun 		dev->dev_private = (void *)dev_priv;
417*4882a593Smuzhiyun 		i810_dma_cleanup(dev);
418*4882a593Smuzhiyun 		DRM_ERROR("Not enough space in the status page for"
419*4882a593Smuzhiyun 			  " the freelist\n");
420*4882a593Smuzhiyun 		return -ENOMEM;
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 	dev->dev_private = (void *)dev_priv;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return 0;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
i810_dma_init(struct drm_device * dev,void * data,struct drm_file * file_priv)427*4882a593Smuzhiyun static int i810_dma_init(struct drm_device *dev, void *data,
428*4882a593Smuzhiyun 			 struct drm_file *file_priv)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv;
431*4882a593Smuzhiyun 	drm_i810_init_t *init = data;
432*4882a593Smuzhiyun 	int retcode = 0;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	switch (init->func) {
435*4882a593Smuzhiyun 	case I810_INIT_DMA_1_4:
436*4882a593Smuzhiyun 		DRM_INFO("Using v1.4 init.\n");
437*4882a593Smuzhiyun 		dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
438*4882a593Smuzhiyun 		if (dev_priv == NULL)
439*4882a593Smuzhiyun 			return -ENOMEM;
440*4882a593Smuzhiyun 		retcode = i810_dma_initialize(dev, dev_priv, init);
441*4882a593Smuzhiyun 		break;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	case I810_CLEANUP_DMA:
444*4882a593Smuzhiyun 		DRM_INFO("DMA Cleanup\n");
445*4882a593Smuzhiyun 		retcode = i810_dma_cleanup(dev);
446*4882a593Smuzhiyun 		break;
447*4882a593Smuzhiyun 	default:
448*4882a593Smuzhiyun 		return -EINVAL;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return retcode;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /* Most efficient way to verify state for the i810 is as it is
455*4882a593Smuzhiyun  * emitted.  Non-conformant state is silently dropped.
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * Use 'volatile' & local var tmp to force the emitted values to be
458*4882a593Smuzhiyun  * identical to the verified ones.
459*4882a593Smuzhiyun  */
i810EmitContextVerified(struct drm_device * dev,volatile unsigned int * code)460*4882a593Smuzhiyun static void i810EmitContextVerified(struct drm_device *dev,
461*4882a593Smuzhiyun 				    volatile unsigned int *code)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
464*4882a593Smuzhiyun 	int i, j = 0;
465*4882a593Smuzhiyun 	unsigned int tmp;
466*4882a593Smuzhiyun 	RING_LOCALS;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	OUT_RING(GFX_OP_COLOR_FACTOR);
471*4882a593Smuzhiyun 	OUT_RING(code[I810_CTXREG_CF1]);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	OUT_RING(GFX_OP_STIPPLE);
474*4882a593Smuzhiyun 	OUT_RING(code[I810_CTXREG_ST1]);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
477*4882a593Smuzhiyun 		tmp = code[i];
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		if ((tmp & (7 << 29)) == (3 << 29) &&
480*4882a593Smuzhiyun 		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
481*4882a593Smuzhiyun 			OUT_RING(tmp);
482*4882a593Smuzhiyun 			j++;
483*4882a593Smuzhiyun 		} else
484*4882a593Smuzhiyun 			printk("constext state dropped!!!\n");
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (j & 1)
488*4882a593Smuzhiyun 		OUT_RING(0);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	ADVANCE_LP_RING();
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
i810EmitTexVerified(struct drm_device * dev,volatile unsigned int * code)493*4882a593Smuzhiyun static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
496*4882a593Smuzhiyun 	int i, j = 0;
497*4882a593Smuzhiyun 	unsigned int tmp;
498*4882a593Smuzhiyun 	RING_LOCALS;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	OUT_RING(GFX_OP_MAP_INFO);
503*4882a593Smuzhiyun 	OUT_RING(code[I810_TEXREG_MI1]);
504*4882a593Smuzhiyun 	OUT_RING(code[I810_TEXREG_MI2]);
505*4882a593Smuzhiyun 	OUT_RING(code[I810_TEXREG_MI3]);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
508*4882a593Smuzhiyun 		tmp = code[i];
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		if ((tmp & (7 << 29)) == (3 << 29) &&
511*4882a593Smuzhiyun 		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
512*4882a593Smuzhiyun 			OUT_RING(tmp);
513*4882a593Smuzhiyun 			j++;
514*4882a593Smuzhiyun 		} else
515*4882a593Smuzhiyun 			printk("texture state dropped!!!\n");
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (j & 1)
519*4882a593Smuzhiyun 		OUT_RING(0);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	ADVANCE_LP_RING();
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /* Need to do some additional checking when setting the dest buffer.
525*4882a593Smuzhiyun  */
i810EmitDestVerified(struct drm_device * dev,volatile unsigned int * code)526*4882a593Smuzhiyun static void i810EmitDestVerified(struct drm_device *dev,
527*4882a593Smuzhiyun 				 volatile unsigned int *code)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
530*4882a593Smuzhiyun 	unsigned int tmp;
531*4882a593Smuzhiyun 	RING_LOCALS;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	tmp = code[I810_DESTREG_DI1];
536*4882a593Smuzhiyun 	if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
537*4882a593Smuzhiyun 		OUT_RING(CMD_OP_DESTBUFFER_INFO);
538*4882a593Smuzhiyun 		OUT_RING(tmp);
539*4882a593Smuzhiyun 	} else
540*4882a593Smuzhiyun 		DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
541*4882a593Smuzhiyun 			  tmp, dev_priv->front_di1, dev_priv->back_di1);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* invarient:
544*4882a593Smuzhiyun 	 */
545*4882a593Smuzhiyun 	OUT_RING(CMD_OP_Z_BUFFER_INFO);
546*4882a593Smuzhiyun 	OUT_RING(dev_priv->zi1);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	OUT_RING(GFX_OP_DESTBUFFER_VARS);
549*4882a593Smuzhiyun 	OUT_RING(code[I810_DESTREG_DV1]);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	OUT_RING(GFX_OP_DRAWRECT_INFO);
552*4882a593Smuzhiyun 	OUT_RING(code[I810_DESTREG_DR1]);
553*4882a593Smuzhiyun 	OUT_RING(code[I810_DESTREG_DR2]);
554*4882a593Smuzhiyun 	OUT_RING(code[I810_DESTREG_DR3]);
555*4882a593Smuzhiyun 	OUT_RING(code[I810_DESTREG_DR4]);
556*4882a593Smuzhiyun 	OUT_RING(0);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	ADVANCE_LP_RING();
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
i810EmitState(struct drm_device * dev)561*4882a593Smuzhiyun static void i810EmitState(struct drm_device *dev)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
564*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
565*4882a593Smuzhiyun 	unsigned int dirty = sarea_priv->dirty;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	DRM_DEBUG("%x\n", dirty);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (dirty & I810_UPLOAD_BUFFERS) {
570*4882a593Smuzhiyun 		i810EmitDestVerified(dev, sarea_priv->BufferState);
571*4882a593Smuzhiyun 		sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (dirty & I810_UPLOAD_CTX) {
575*4882a593Smuzhiyun 		i810EmitContextVerified(dev, sarea_priv->ContextState);
576*4882a593Smuzhiyun 		sarea_priv->dirty &= ~I810_UPLOAD_CTX;
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (dirty & I810_UPLOAD_TEX0) {
580*4882a593Smuzhiyun 		i810EmitTexVerified(dev, sarea_priv->TexState[0]);
581*4882a593Smuzhiyun 		sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (dirty & I810_UPLOAD_TEX1) {
585*4882a593Smuzhiyun 		i810EmitTexVerified(dev, sarea_priv->TexState[1]);
586*4882a593Smuzhiyun 		sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun /* need to verify
591*4882a593Smuzhiyun  */
i810_dma_dispatch_clear(struct drm_device * dev,int flags,unsigned int clear_color,unsigned int clear_zval)592*4882a593Smuzhiyun static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
593*4882a593Smuzhiyun 				    unsigned int clear_color,
594*4882a593Smuzhiyun 				    unsigned int clear_zval)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
597*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
598*4882a593Smuzhiyun 	int nbox = sarea_priv->nbox;
599*4882a593Smuzhiyun 	struct drm_clip_rect *pbox = sarea_priv->boxes;
600*4882a593Smuzhiyun 	int pitch = dev_priv->pitch;
601*4882a593Smuzhiyun 	int cpp = 2;
602*4882a593Smuzhiyun 	int i;
603*4882a593Smuzhiyun 	RING_LOCALS;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (dev_priv->current_page == 1) {
606*4882a593Smuzhiyun 		unsigned int tmp = flags;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		flags &= ~(I810_FRONT | I810_BACK);
609*4882a593Smuzhiyun 		if (tmp & I810_FRONT)
610*4882a593Smuzhiyun 			flags |= I810_BACK;
611*4882a593Smuzhiyun 		if (tmp & I810_BACK)
612*4882a593Smuzhiyun 			flags |= I810_FRONT;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (nbox > I810_NR_SAREA_CLIPRECTS)
618*4882a593Smuzhiyun 		nbox = I810_NR_SAREA_CLIPRECTS;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	for (i = 0; i < nbox; i++, pbox++) {
621*4882a593Smuzhiyun 		unsigned int x = pbox->x1;
622*4882a593Smuzhiyun 		unsigned int y = pbox->y1;
623*4882a593Smuzhiyun 		unsigned int width = (pbox->x2 - x) * cpp;
624*4882a593Smuzhiyun 		unsigned int height = pbox->y2 - y;
625*4882a593Smuzhiyun 		unsigned int start = y * pitch + x * cpp;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		if (pbox->x1 > pbox->x2 ||
628*4882a593Smuzhiyun 		    pbox->y1 > pbox->y2 ||
629*4882a593Smuzhiyun 		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
630*4882a593Smuzhiyun 			continue;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		if (flags & I810_FRONT) {
633*4882a593Smuzhiyun 			BEGIN_LP_RING(6);
634*4882a593Smuzhiyun 			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
635*4882a593Smuzhiyun 			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
636*4882a593Smuzhiyun 			OUT_RING((height << 16) | width);
637*4882a593Smuzhiyun 			OUT_RING(start);
638*4882a593Smuzhiyun 			OUT_RING(clear_color);
639*4882a593Smuzhiyun 			OUT_RING(0);
640*4882a593Smuzhiyun 			ADVANCE_LP_RING();
641*4882a593Smuzhiyun 		}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		if (flags & I810_BACK) {
644*4882a593Smuzhiyun 			BEGIN_LP_RING(6);
645*4882a593Smuzhiyun 			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
646*4882a593Smuzhiyun 			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
647*4882a593Smuzhiyun 			OUT_RING((height << 16) | width);
648*4882a593Smuzhiyun 			OUT_RING(dev_priv->back_offset + start);
649*4882a593Smuzhiyun 			OUT_RING(clear_color);
650*4882a593Smuzhiyun 			OUT_RING(0);
651*4882a593Smuzhiyun 			ADVANCE_LP_RING();
652*4882a593Smuzhiyun 		}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		if (flags & I810_DEPTH) {
655*4882a593Smuzhiyun 			BEGIN_LP_RING(6);
656*4882a593Smuzhiyun 			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
657*4882a593Smuzhiyun 			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
658*4882a593Smuzhiyun 			OUT_RING((height << 16) | width);
659*4882a593Smuzhiyun 			OUT_RING(dev_priv->depth_offset + start);
660*4882a593Smuzhiyun 			OUT_RING(clear_zval);
661*4882a593Smuzhiyun 			OUT_RING(0);
662*4882a593Smuzhiyun 			ADVANCE_LP_RING();
663*4882a593Smuzhiyun 		}
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
i810_dma_dispatch_swap(struct drm_device * dev)667*4882a593Smuzhiyun static void i810_dma_dispatch_swap(struct drm_device *dev)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
670*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
671*4882a593Smuzhiyun 	int nbox = sarea_priv->nbox;
672*4882a593Smuzhiyun 	struct drm_clip_rect *pbox = sarea_priv->boxes;
673*4882a593Smuzhiyun 	int pitch = dev_priv->pitch;
674*4882a593Smuzhiyun 	int cpp = 2;
675*4882a593Smuzhiyun 	int i;
676*4882a593Smuzhiyun 	RING_LOCALS;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	DRM_DEBUG("swapbuffers\n");
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	if (nbox > I810_NR_SAREA_CLIPRECTS)
683*4882a593Smuzhiyun 		nbox = I810_NR_SAREA_CLIPRECTS;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	for (i = 0; i < nbox; i++, pbox++) {
686*4882a593Smuzhiyun 		unsigned int w = pbox->x2 - pbox->x1;
687*4882a593Smuzhiyun 		unsigned int h = pbox->y2 - pbox->y1;
688*4882a593Smuzhiyun 		unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
689*4882a593Smuzhiyun 		unsigned int start = dst;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		if (pbox->x1 > pbox->x2 ||
692*4882a593Smuzhiyun 		    pbox->y1 > pbox->y2 ||
693*4882a593Smuzhiyun 		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
694*4882a593Smuzhiyun 			continue;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		BEGIN_LP_RING(6);
697*4882a593Smuzhiyun 		OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
698*4882a593Smuzhiyun 		OUT_RING(pitch | (0xCC << 16));
699*4882a593Smuzhiyun 		OUT_RING((h << 16) | (w * cpp));
700*4882a593Smuzhiyun 		if (dev_priv->current_page == 0)
701*4882a593Smuzhiyun 			OUT_RING(dev_priv->front_offset + start);
702*4882a593Smuzhiyun 		else
703*4882a593Smuzhiyun 			OUT_RING(dev_priv->back_offset + start);
704*4882a593Smuzhiyun 		OUT_RING(pitch);
705*4882a593Smuzhiyun 		if (dev_priv->current_page == 0)
706*4882a593Smuzhiyun 			OUT_RING(dev_priv->back_offset + start);
707*4882a593Smuzhiyun 		else
708*4882a593Smuzhiyun 			OUT_RING(dev_priv->front_offset + start);
709*4882a593Smuzhiyun 		ADVANCE_LP_RING();
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
i810_dma_dispatch_vertex(struct drm_device * dev,struct drm_buf * buf,int discard,int used)713*4882a593Smuzhiyun static void i810_dma_dispatch_vertex(struct drm_device *dev,
714*4882a593Smuzhiyun 				     struct drm_buf *buf, int discard, int used)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
717*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
718*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
719*4882a593Smuzhiyun 	struct drm_clip_rect *box = sarea_priv->boxes;
720*4882a593Smuzhiyun 	int nbox = sarea_priv->nbox;
721*4882a593Smuzhiyun 	unsigned long address = (unsigned long)buf->bus_address;
722*4882a593Smuzhiyun 	unsigned long start = address - dev->agp->base;
723*4882a593Smuzhiyun 	int i = 0;
724*4882a593Smuzhiyun 	RING_LOCALS;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (nbox > I810_NR_SAREA_CLIPRECTS)
729*4882a593Smuzhiyun 		nbox = I810_NR_SAREA_CLIPRECTS;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (used < 0 || used > 4 * 1024)
732*4882a593Smuzhiyun 		used = 0;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	if (sarea_priv->dirty)
735*4882a593Smuzhiyun 		i810EmitState(dev);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
738*4882a593Smuzhiyun 		unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		*(u32 *) buf_priv->kernel_virtual =
741*4882a593Smuzhiyun 		    ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		if (used & 4) {
744*4882a593Smuzhiyun 			*(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
745*4882a593Smuzhiyun 			used += 4;
746*4882a593Smuzhiyun 		}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 		i810_unmap_buffer(buf);
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	if (used) {
752*4882a593Smuzhiyun 		do {
753*4882a593Smuzhiyun 			if (i < nbox) {
754*4882a593Smuzhiyun 				BEGIN_LP_RING(4);
755*4882a593Smuzhiyun 				OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
756*4882a593Smuzhiyun 					 SC_ENABLE);
757*4882a593Smuzhiyun 				OUT_RING(GFX_OP_SCISSOR_INFO);
758*4882a593Smuzhiyun 				OUT_RING(box[i].x1 | (box[i].y1 << 16));
759*4882a593Smuzhiyun 				OUT_RING((box[i].x2 -
760*4882a593Smuzhiyun 					  1) | ((box[i].y2 - 1) << 16));
761*4882a593Smuzhiyun 				ADVANCE_LP_RING();
762*4882a593Smuzhiyun 			}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 			BEGIN_LP_RING(4);
765*4882a593Smuzhiyun 			OUT_RING(CMD_OP_BATCH_BUFFER);
766*4882a593Smuzhiyun 			OUT_RING(start | BB1_PROTECTED);
767*4882a593Smuzhiyun 			OUT_RING(start + used - 4);
768*4882a593Smuzhiyun 			OUT_RING(0);
769*4882a593Smuzhiyun 			ADVANCE_LP_RING();
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 		} while (++i < nbox);
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	if (discard) {
775*4882a593Smuzhiyun 		dev_priv->counter++;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 		(void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
778*4882a593Smuzhiyun 			      I810_BUF_HARDWARE);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		BEGIN_LP_RING(8);
781*4882a593Smuzhiyun 		OUT_RING(CMD_STORE_DWORD_IDX);
782*4882a593Smuzhiyun 		OUT_RING(20);
783*4882a593Smuzhiyun 		OUT_RING(dev_priv->counter);
784*4882a593Smuzhiyun 		OUT_RING(CMD_STORE_DWORD_IDX);
785*4882a593Smuzhiyun 		OUT_RING(buf_priv->my_use_idx);
786*4882a593Smuzhiyun 		OUT_RING(I810_BUF_FREE);
787*4882a593Smuzhiyun 		OUT_RING(CMD_REPORT_HEAD);
788*4882a593Smuzhiyun 		OUT_RING(0);
789*4882a593Smuzhiyun 		ADVANCE_LP_RING();
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
i810_dma_dispatch_flip(struct drm_device * dev)793*4882a593Smuzhiyun static void i810_dma_dispatch_flip(struct drm_device *dev)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
796*4882a593Smuzhiyun 	int pitch = dev_priv->pitch;
797*4882a593Smuzhiyun 	RING_LOCALS;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	DRM_DEBUG("page=%d pfCurrentPage=%d\n",
800*4882a593Smuzhiyun 		  dev_priv->current_page,
801*4882a593Smuzhiyun 		  dev_priv->sarea_priv->pf_current_page);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	BEGIN_LP_RING(2);
806*4882a593Smuzhiyun 	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
807*4882a593Smuzhiyun 	OUT_RING(0);
808*4882a593Smuzhiyun 	ADVANCE_LP_RING();
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
811*4882a593Smuzhiyun 	/* On i815 at least ASYNC is buggy */
812*4882a593Smuzhiyun 	/* pitch<<5 is from 11.2.8 p158,
813*4882a593Smuzhiyun 	   its the pitch / 8 then left shifted 8,
814*4882a593Smuzhiyun 	   so (pitch >> 3) << 8 */
815*4882a593Smuzhiyun 	OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
816*4882a593Smuzhiyun 	if (dev_priv->current_page == 0) {
817*4882a593Smuzhiyun 		OUT_RING(dev_priv->back_offset);
818*4882a593Smuzhiyun 		dev_priv->current_page = 1;
819*4882a593Smuzhiyun 	} else {
820*4882a593Smuzhiyun 		OUT_RING(dev_priv->front_offset);
821*4882a593Smuzhiyun 		dev_priv->current_page = 0;
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 	OUT_RING(0);
824*4882a593Smuzhiyun 	ADVANCE_LP_RING();
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	BEGIN_LP_RING(2);
827*4882a593Smuzhiyun 	OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
828*4882a593Smuzhiyun 	OUT_RING(0);
829*4882a593Smuzhiyun 	ADVANCE_LP_RING();
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Increment the frame counter.  The client-side 3D driver must
832*4882a593Smuzhiyun 	 * throttle the framerate by waiting for this value before
833*4882a593Smuzhiyun 	 * performing the swapbuffer ioctl.
834*4882a593Smuzhiyun 	 */
835*4882a593Smuzhiyun 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
i810_dma_quiescent(struct drm_device * dev)839*4882a593Smuzhiyun static void i810_dma_quiescent(struct drm_device *dev)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
842*4882a593Smuzhiyun 	RING_LOCALS;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	BEGIN_LP_RING(4);
847*4882a593Smuzhiyun 	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
848*4882a593Smuzhiyun 	OUT_RING(CMD_REPORT_HEAD);
849*4882a593Smuzhiyun 	OUT_RING(0);
850*4882a593Smuzhiyun 	OUT_RING(0);
851*4882a593Smuzhiyun 	ADVANCE_LP_RING();
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	i810_wait_ring(dev, dev_priv->ring.Size - 8);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
i810_flush_queue(struct drm_device * dev)856*4882a593Smuzhiyun static void i810_flush_queue(struct drm_device *dev)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
859*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
860*4882a593Smuzhiyun 	int i;
861*4882a593Smuzhiyun 	RING_LOCALS;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	BEGIN_LP_RING(2);
866*4882a593Smuzhiyun 	OUT_RING(CMD_REPORT_HEAD);
867*4882a593Smuzhiyun 	OUT_RING(0);
868*4882a593Smuzhiyun 	ADVANCE_LP_RING();
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	i810_wait_ring(dev, dev_priv->ring.Size - 8);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	for (i = 0; i < dma->buf_count; i++) {
873*4882a593Smuzhiyun 		struct drm_buf *buf = dma->buflist[i];
874*4882a593Smuzhiyun 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 		int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
877*4882a593Smuzhiyun 				   I810_BUF_FREE);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 		if (used == I810_BUF_HARDWARE)
880*4882a593Smuzhiyun 			DRM_DEBUG("reclaimed from HARDWARE\n");
881*4882a593Smuzhiyun 		if (used == I810_BUF_CLIENT)
882*4882a593Smuzhiyun 			DRM_DEBUG("still on client\n");
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	return;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun /* Must be called with the lock held */
i810_driver_reclaim_buffers(struct drm_device * dev,struct drm_file * file_priv)889*4882a593Smuzhiyun void i810_driver_reclaim_buffers(struct drm_device *dev,
890*4882a593Smuzhiyun 				 struct drm_file *file_priv)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
893*4882a593Smuzhiyun 	int i;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	if (!dma)
896*4882a593Smuzhiyun 		return;
897*4882a593Smuzhiyun 	if (!dev->dev_private)
898*4882a593Smuzhiyun 		return;
899*4882a593Smuzhiyun 	if (!dma->buflist)
900*4882a593Smuzhiyun 		return;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	i810_flush_queue(dev);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	for (i = 0; i < dma->buf_count; i++) {
905*4882a593Smuzhiyun 		struct drm_buf *buf = dma->buflist[i];
906*4882a593Smuzhiyun 		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 		if (buf->file_priv == file_priv && buf_priv) {
909*4882a593Smuzhiyun 			int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
910*4882a593Smuzhiyun 					   I810_BUF_FREE);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 			if (used == I810_BUF_CLIENT)
913*4882a593Smuzhiyun 				DRM_DEBUG("reclaimed from client\n");
914*4882a593Smuzhiyun 			if (buf_priv->currently_mapped == I810_BUF_MAPPED)
915*4882a593Smuzhiyun 				buf_priv->currently_mapped = I810_BUF_UNMAPPED;
916*4882a593Smuzhiyun 		}
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
i810_flush_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)920*4882a593Smuzhiyun static int i810_flush_ioctl(struct drm_device *dev, void *data,
921*4882a593Smuzhiyun 			    struct drm_file *file_priv)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	i810_flush_queue(dev);
926*4882a593Smuzhiyun 	return 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
i810_dma_vertex(struct drm_device * dev,void * data,struct drm_file * file_priv)929*4882a593Smuzhiyun static int i810_dma_vertex(struct drm_device *dev, void *data,
930*4882a593Smuzhiyun 			   struct drm_file *file_priv)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
933*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
934*4882a593Smuzhiyun 	u32 *hw_status = dev_priv->hw_status_page;
935*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
936*4882a593Smuzhiyun 	    dev_priv->sarea_priv;
937*4882a593Smuzhiyun 	drm_i810_vertex_t *vertex = data;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	DRM_DEBUG("idx %d used %d discard %d\n",
942*4882a593Smuzhiyun 		  vertex->idx, vertex->used, vertex->discard);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
945*4882a593Smuzhiyun 		return -EINVAL;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	i810_dma_dispatch_vertex(dev,
948*4882a593Smuzhiyun 				 dma->buflist[vertex->idx],
949*4882a593Smuzhiyun 				 vertex->discard, vertex->used);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	sarea_priv->last_enqueue = dev_priv->counter - 1;
952*4882a593Smuzhiyun 	sarea_priv->last_dispatch = (int)hw_status[5];
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	return 0;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun 
i810_clear_bufs(struct drm_device * dev,void * data,struct drm_file * file_priv)957*4882a593Smuzhiyun static int i810_clear_bufs(struct drm_device *dev, void *data,
958*4882a593Smuzhiyun 			   struct drm_file *file_priv)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	drm_i810_clear_t *clear = data;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	/* GH: Someone's doing nasty things... */
965*4882a593Smuzhiyun 	if (!dev->dev_private)
966*4882a593Smuzhiyun 		return -EINVAL;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	i810_dma_dispatch_clear(dev, clear->flags,
969*4882a593Smuzhiyun 				clear->clear_color, clear->clear_depth);
970*4882a593Smuzhiyun 	return 0;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun 
i810_swap_bufs(struct drm_device * dev,void * data,struct drm_file * file_priv)973*4882a593Smuzhiyun static int i810_swap_bufs(struct drm_device *dev, void *data,
974*4882a593Smuzhiyun 			  struct drm_file *file_priv)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	DRM_DEBUG("\n");
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	i810_dma_dispatch_swap(dev);
981*4882a593Smuzhiyun 	return 0;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun 
i810_getage(struct drm_device * dev,void * data,struct drm_file * file_priv)984*4882a593Smuzhiyun static int i810_getage(struct drm_device *dev, void *data,
985*4882a593Smuzhiyun 		       struct drm_file *file_priv)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
988*4882a593Smuzhiyun 	u32 *hw_status = dev_priv->hw_status_page;
989*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
990*4882a593Smuzhiyun 	    dev_priv->sarea_priv;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	sarea_priv->last_dispatch = (int)hw_status[5];
993*4882a593Smuzhiyun 	return 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun 
i810_getbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)996*4882a593Smuzhiyun static int i810_getbuf(struct drm_device *dev, void *data,
997*4882a593Smuzhiyun 		       struct drm_file *file_priv)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun 	int retcode = 0;
1000*4882a593Smuzhiyun 	drm_i810_dma_t *d = data;
1001*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1002*4882a593Smuzhiyun 	u32 *hw_status = dev_priv->hw_status_page;
1003*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1004*4882a593Smuzhiyun 	    dev_priv->sarea_priv;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	d->granted = 0;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	retcode = i810_dma_get_buffer(dev, d, file_priv);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1013*4882a593Smuzhiyun 		  task_pid_nr(current), retcode, d->granted);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	sarea_priv->last_dispatch = (int)hw_status[5];
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	return retcode;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
i810_copybuf(struct drm_device * dev,void * data,struct drm_file * file_priv)1020*4882a593Smuzhiyun static int i810_copybuf(struct drm_device *dev, void *data,
1021*4882a593Smuzhiyun 			struct drm_file *file_priv)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun 	/* Never copy - 2.4.x doesn't need it */
1024*4882a593Smuzhiyun 	return 0;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
i810_docopy(struct drm_device * dev,void * data,struct drm_file * file_priv)1027*4882a593Smuzhiyun static int i810_docopy(struct drm_device *dev, void *data,
1028*4882a593Smuzhiyun 			struct drm_file *file_priv)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	/* Never copy - 2.4.x doesn't need it */
1031*4882a593Smuzhiyun 	return 0;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun 
i810_dma_dispatch_mc(struct drm_device * dev,struct drm_buf * buf,int used,unsigned int last_render)1034*4882a593Smuzhiyun static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
1035*4882a593Smuzhiyun 				 unsigned int last_render)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
1038*4882a593Smuzhiyun 	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1039*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1040*4882a593Smuzhiyun 	unsigned long address = (unsigned long)buf->bus_address;
1041*4882a593Smuzhiyun 	unsigned long start = address - dev->agp->base;
1042*4882a593Smuzhiyun 	int u;
1043*4882a593Smuzhiyun 	RING_LOCALS;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	i810_kernel_lost_context(dev);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
1048*4882a593Smuzhiyun 	if (u != I810_BUF_CLIENT)
1049*4882a593Smuzhiyun 		DRM_DEBUG("MC found buffer that isn't mine!\n");
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	if (used < 0 || used > 4 * 1024)
1052*4882a593Smuzhiyun 		used = 0;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	sarea_priv->dirty = 0x7f;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	dev_priv->counter++;
1059*4882a593Smuzhiyun 	DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1060*4882a593Smuzhiyun 	DRM_DEBUG("start : %lx\n", start);
1061*4882a593Smuzhiyun 	DRM_DEBUG("used : %d\n", used);
1062*4882a593Smuzhiyun 	DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1065*4882a593Smuzhiyun 		if (used & 4) {
1066*4882a593Smuzhiyun 			*(u32 *) ((char *) buf_priv->virtual + used) = 0;
1067*4882a593Smuzhiyun 			used += 4;
1068*4882a593Smuzhiyun 		}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		i810_unmap_buffer(buf);
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 	BEGIN_LP_RING(4);
1073*4882a593Smuzhiyun 	OUT_RING(CMD_OP_BATCH_BUFFER);
1074*4882a593Smuzhiyun 	OUT_RING(start | BB1_PROTECTED);
1075*4882a593Smuzhiyun 	OUT_RING(start + used - 4);
1076*4882a593Smuzhiyun 	OUT_RING(0);
1077*4882a593Smuzhiyun 	ADVANCE_LP_RING();
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	BEGIN_LP_RING(8);
1080*4882a593Smuzhiyun 	OUT_RING(CMD_STORE_DWORD_IDX);
1081*4882a593Smuzhiyun 	OUT_RING(buf_priv->my_use_idx);
1082*4882a593Smuzhiyun 	OUT_RING(I810_BUF_FREE);
1083*4882a593Smuzhiyun 	OUT_RING(0);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	OUT_RING(CMD_STORE_DWORD_IDX);
1086*4882a593Smuzhiyun 	OUT_RING(16);
1087*4882a593Smuzhiyun 	OUT_RING(last_render);
1088*4882a593Smuzhiyun 	OUT_RING(0);
1089*4882a593Smuzhiyun 	ADVANCE_LP_RING();
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
i810_dma_mc(struct drm_device * dev,void * data,struct drm_file * file_priv)1092*4882a593Smuzhiyun static int i810_dma_mc(struct drm_device *dev, void *data,
1093*4882a593Smuzhiyun 		       struct drm_file *file_priv)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
1096*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1097*4882a593Smuzhiyun 	u32 *hw_status = dev_priv->hw_status_page;
1098*4882a593Smuzhiyun 	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1099*4882a593Smuzhiyun 	    dev_priv->sarea_priv;
1100*4882a593Smuzhiyun 	drm_i810_mc_t *mc = data;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	if (mc->idx >= dma->buf_count || mc->idx < 0)
1105*4882a593Smuzhiyun 		return -EINVAL;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1108*4882a593Smuzhiyun 			     mc->last_render);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	sarea_priv->last_enqueue = dev_priv->counter - 1;
1111*4882a593Smuzhiyun 	sarea_priv->last_dispatch = (int)hw_status[5];
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	return 0;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
i810_rstatus(struct drm_device * dev,void * data,struct drm_file * file_priv)1116*4882a593Smuzhiyun static int i810_rstatus(struct drm_device *dev, void *data,
1117*4882a593Smuzhiyun 			struct drm_file *file_priv)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
i810_ov0_info(struct drm_device * dev,void * data,struct drm_file * file_priv)1124*4882a593Smuzhiyun static int i810_ov0_info(struct drm_device *dev, void *data,
1125*4882a593Smuzhiyun 			 struct drm_file *file_priv)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1128*4882a593Smuzhiyun 	drm_i810_overlay_t *ov = data;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	ov->offset = dev_priv->overlay_offset;
1131*4882a593Smuzhiyun 	ov->physical = dev_priv->overlay_physical;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	return 0;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun 
i810_fstatus(struct drm_device * dev,void * data,struct drm_file * file_priv)1136*4882a593Smuzhiyun static int i810_fstatus(struct drm_device *dev, void *data,
1137*4882a593Smuzhiyun 			struct drm_file *file_priv)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
1142*4882a593Smuzhiyun 	return I810_READ(0x30008);
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun 
i810_ov0_flip(struct drm_device * dev,void * data,struct drm_file * file_priv)1145*4882a593Smuzhiyun static int i810_ov0_flip(struct drm_device *dev, void *data,
1146*4882a593Smuzhiyun 			 struct drm_file *file_priv)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	/* Tell the overlay to update */
1153*4882a593Smuzhiyun 	I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	return 0;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun /* Not sure why this isn't set all the time:
1159*4882a593Smuzhiyun  */
i810_do_init_pageflip(struct drm_device * dev)1160*4882a593Smuzhiyun static void i810_do_init_pageflip(struct drm_device *dev)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	DRM_DEBUG("\n");
1165*4882a593Smuzhiyun 	dev_priv->page_flipping = 1;
1166*4882a593Smuzhiyun 	dev_priv->current_page = 0;
1167*4882a593Smuzhiyun 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun 
i810_do_cleanup_pageflip(struct drm_device * dev)1170*4882a593Smuzhiyun static int i810_do_cleanup_pageflip(struct drm_device *dev)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	DRM_DEBUG("\n");
1175*4882a593Smuzhiyun 	if (dev_priv->current_page != 0)
1176*4882a593Smuzhiyun 		i810_dma_dispatch_flip(dev);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	dev_priv->page_flipping = 0;
1179*4882a593Smuzhiyun 	return 0;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun 
i810_flip_bufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1182*4882a593Smuzhiyun static int i810_flip_bufs(struct drm_device *dev, void *data,
1183*4882a593Smuzhiyun 			  struct drm_file *file_priv)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun 	drm_i810_private_t *dev_priv = dev->dev_private;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	DRM_DEBUG("\n");
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	if (!dev_priv->page_flipping)
1192*4882a593Smuzhiyun 		i810_do_init_pageflip(dev);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	i810_dma_dispatch_flip(dev);
1195*4882a593Smuzhiyun 	return 0;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
i810_driver_load(struct drm_device * dev,unsigned long flags)1198*4882a593Smuzhiyun int i810_driver_load(struct drm_device *dev, unsigned long flags)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	dev->agp = drm_agp_init(dev);
1201*4882a593Smuzhiyun 	if (dev->agp) {
1202*4882a593Smuzhiyun 		dev->agp->agp_mtrr = arch_phys_wc_add(
1203*4882a593Smuzhiyun 			dev->agp->agp_info.aper_base,
1204*4882a593Smuzhiyun 			dev->agp->agp_info.aper_size *
1205*4882a593Smuzhiyun 			1024 * 1024);
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/* Our userspace depends upon the agp mapping support. */
1209*4882a593Smuzhiyun 	if (!dev->agp)
1210*4882a593Smuzhiyun 		return -EINVAL;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	pci_set_master(dev->pdev);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	return 0;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun 
i810_driver_lastclose(struct drm_device * dev)1217*4882a593Smuzhiyun void i810_driver_lastclose(struct drm_device *dev)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun 	i810_dma_cleanup(dev);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun 
i810_driver_preclose(struct drm_device * dev,struct drm_file * file_priv)1222*4882a593Smuzhiyun void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	if (dev->dev_private) {
1225*4882a593Smuzhiyun 		drm_i810_private_t *dev_priv = dev->dev_private;
1226*4882a593Smuzhiyun 		if (dev_priv->page_flipping)
1227*4882a593Smuzhiyun 			i810_do_cleanup_pageflip(dev);
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	if (file_priv->master && file_priv->master->lock.hw_lock) {
1231*4882a593Smuzhiyun 		drm_legacy_idlelock_take(&file_priv->master->lock);
1232*4882a593Smuzhiyun 		i810_driver_reclaim_buffers(dev, file_priv);
1233*4882a593Smuzhiyun 		drm_legacy_idlelock_release(&file_priv->master->lock);
1234*4882a593Smuzhiyun 	} else {
1235*4882a593Smuzhiyun 		/* master disappeared, clean up stuff anyway and hope nothing
1236*4882a593Smuzhiyun 		 * goes wrong */
1237*4882a593Smuzhiyun 		i810_driver_reclaim_buffers(dev, file_priv);
1238*4882a593Smuzhiyun 	}
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun 
i810_driver_dma_quiescent(struct drm_device * dev)1242*4882a593Smuzhiyun int i810_driver_dma_quiescent(struct drm_device *dev)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	i810_dma_quiescent(dev);
1245*4882a593Smuzhiyun 	return 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun const struct drm_ioctl_desc i810_ioctls[] = {
1249*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1250*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1251*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1252*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1253*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
1254*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
1255*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1256*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
1257*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
1258*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
1259*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
1260*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
1261*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1262*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
1263*4882a593Smuzhiyun 	DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1264*4882a593Smuzhiyun };
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
1267