xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/via/via_dmablit.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
7*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
8*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sub license,
9*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
10*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
13*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
14*4882a593Smuzhiyun  * of the Software.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * Authors:
25*4882a593Smuzhiyun  *    Thomas Hellstrom.
26*4882a593Smuzhiyun  *    Partially based on code obtained from Digeo Inc.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * Unmaps the DMA mappings.
32*4882a593Smuzhiyun  * FIXME: Is this a NoOp on x86? Also
33*4882a593Smuzhiyun  * FIXME: What happens if this one is called and a pending blit has previously done
34*4882a593Smuzhiyun  * the same DMA mappings?
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <linux/pagemap.h>
38*4882a593Smuzhiyun #include <linux/pci.h>
39*4882a593Smuzhiyun #include <linux/slab.h>
40*4882a593Smuzhiyun #include <linux/vmalloc.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <drm/drm_device.h>
43*4882a593Smuzhiyun #include <drm/via_drm.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include "via_dmablit.h"
46*4882a593Smuzhiyun #include "via_drv.h"
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define VIA_PGDN(x)	     (((unsigned long)(x)) & PAGE_MASK)
49*4882a593Smuzhiyun #define VIA_PGOFF(x)	    (((unsigned long)(x)) & ~PAGE_MASK)
50*4882a593Smuzhiyun #define VIA_PFN(x)	      ((unsigned long)(x) >> PAGE_SHIFT)
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun typedef struct _drm_via_descriptor {
53*4882a593Smuzhiyun 	uint32_t mem_addr;
54*4882a593Smuzhiyun 	uint32_t dev_addr;
55*4882a593Smuzhiyun 	uint32_t size;
56*4882a593Smuzhiyun 	uint32_t next;
57*4882a593Smuzhiyun } drm_via_descriptor_t;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Unmap a DMA mapping.
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun static void
via_unmap_blit_from_device(struct pci_dev * pdev,drm_via_sg_info_t * vsg)67*4882a593Smuzhiyun via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	int num_desc = vsg->num_desc;
70*4882a593Smuzhiyun 	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
71*4882a593Smuzhiyun 	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
72*4882a593Smuzhiyun 	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
73*4882a593Smuzhiyun 		descriptor_this_page;
74*4882a593Smuzhiyun 	dma_addr_t next = vsg->chain_start;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	while (num_desc--) {
77*4882a593Smuzhiyun 		if (descriptor_this_page-- == 0) {
78*4882a593Smuzhiyun 			cur_descriptor_page--;
79*4882a593Smuzhiyun 			descriptor_this_page = vsg->descriptors_per_page - 1;
80*4882a593Smuzhiyun 			desc_ptr = vsg->desc_pages[cur_descriptor_page] +
81*4882a593Smuzhiyun 				descriptor_this_page;
82*4882a593Smuzhiyun 		}
83*4882a593Smuzhiyun 		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
84*4882a593Smuzhiyun 		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
85*4882a593Smuzhiyun 		next = (dma_addr_t) desc_ptr->next;
86*4882a593Smuzhiyun 		desc_ptr--;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun  * If mode = 0, count how many descriptors are needed.
92*4882a593Smuzhiyun  * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
93*4882a593Smuzhiyun  * Descriptors are run in reverse order by the hardware because we are not allowed to update the
94*4882a593Smuzhiyun  * 'next' field without syncing calls when the descriptor is already mapped.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static void
via_map_blit_for_device(struct pci_dev * pdev,const drm_via_dmablit_t * xfer,drm_via_sg_info_t * vsg,int mode)98*4882a593Smuzhiyun via_map_blit_for_device(struct pci_dev *pdev,
99*4882a593Smuzhiyun 		   const drm_via_dmablit_t *xfer,
100*4882a593Smuzhiyun 		   drm_via_sg_info_t *vsg,
101*4882a593Smuzhiyun 		   int mode)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	unsigned cur_descriptor_page = 0;
104*4882a593Smuzhiyun 	unsigned num_descriptors_this_page = 0;
105*4882a593Smuzhiyun 	unsigned char *mem_addr = xfer->mem_addr;
106*4882a593Smuzhiyun 	unsigned char *cur_mem;
107*4882a593Smuzhiyun 	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
108*4882a593Smuzhiyun 	uint32_t fb_addr = xfer->fb_addr;
109*4882a593Smuzhiyun 	uint32_t cur_fb;
110*4882a593Smuzhiyun 	unsigned long line_len;
111*4882a593Smuzhiyun 	unsigned remaining_len;
112*4882a593Smuzhiyun 	int num_desc = 0;
113*4882a593Smuzhiyun 	int cur_line;
114*4882a593Smuzhiyun 	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
115*4882a593Smuzhiyun 	drm_via_descriptor_t *desc_ptr = NULL;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (mode == 1)
118*4882a593Smuzhiyun 		desc_ptr = vsg->desc_pages[cur_descriptor_page];
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 		line_len = xfer->line_length;
123*4882a593Smuzhiyun 		cur_fb = fb_addr;
124*4882a593Smuzhiyun 		cur_mem = mem_addr;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 		while (line_len > 0) {
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 			remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
129*4882a593Smuzhiyun 			line_len -= remaining_len;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 			if (mode == 1) {
132*4882a593Smuzhiyun 				desc_ptr->mem_addr =
133*4882a593Smuzhiyun 					dma_map_page(&pdev->dev,
134*4882a593Smuzhiyun 						     vsg->pages[VIA_PFN(cur_mem) -
135*4882a593Smuzhiyun 								VIA_PFN(first_addr)],
136*4882a593Smuzhiyun 						     VIA_PGOFF(cur_mem), remaining_len,
137*4882a593Smuzhiyun 						     vsg->direction);
138*4882a593Smuzhiyun 				desc_ptr->dev_addr = cur_fb;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 				desc_ptr->size = remaining_len;
141*4882a593Smuzhiyun 				desc_ptr->next = (uint32_t) next;
142*4882a593Smuzhiyun 				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
143*4882a593Smuzhiyun 						      DMA_TO_DEVICE);
144*4882a593Smuzhiyun 				desc_ptr++;
145*4882a593Smuzhiyun 				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
146*4882a593Smuzhiyun 					num_descriptors_this_page = 0;
147*4882a593Smuzhiyun 					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
148*4882a593Smuzhiyun 				}
149*4882a593Smuzhiyun 			}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 			num_desc++;
152*4882a593Smuzhiyun 			cur_mem += remaining_len;
153*4882a593Smuzhiyun 			cur_fb += remaining_len;
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		mem_addr += xfer->mem_stride;
157*4882a593Smuzhiyun 		fb_addr += xfer->fb_stride;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (mode == 1) {
161*4882a593Smuzhiyun 		vsg->chain_start = next;
162*4882a593Smuzhiyun 		vsg->state = dr_via_device_mapped;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	vsg->num_desc = num_desc;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun  * Function that frees up all resources for a blit. It is usable even if the
169*4882a593Smuzhiyun  * blit info has only been partially built as long as the status enum is consistent
170*4882a593Smuzhiyun  * with the actual status of the used resources.
171*4882a593Smuzhiyun  */
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun static void
via_free_sg_info(struct pci_dev * pdev,drm_via_sg_info_t * vsg)175*4882a593Smuzhiyun via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	int i;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	switch (vsg->state) {
180*4882a593Smuzhiyun 	case dr_via_device_mapped:
181*4882a593Smuzhiyun 		via_unmap_blit_from_device(pdev, vsg);
182*4882a593Smuzhiyun 		fallthrough;
183*4882a593Smuzhiyun 	case dr_via_desc_pages_alloc:
184*4882a593Smuzhiyun 		for (i = 0; i < vsg->num_desc_pages; ++i) {
185*4882a593Smuzhiyun 			if (vsg->desc_pages[i] != NULL)
186*4882a593Smuzhiyun 				free_page((unsigned long)vsg->desc_pages[i]);
187*4882a593Smuzhiyun 		}
188*4882a593Smuzhiyun 		kfree(vsg->desc_pages);
189*4882a593Smuzhiyun 		fallthrough;
190*4882a593Smuzhiyun 	case dr_via_pages_locked:
191*4882a593Smuzhiyun 		unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
192*4882a593Smuzhiyun 					   (vsg->direction == DMA_FROM_DEVICE));
193*4882a593Smuzhiyun 		fallthrough;
194*4882a593Smuzhiyun 	case dr_via_pages_alloc:
195*4882a593Smuzhiyun 		vfree(vsg->pages);
196*4882a593Smuzhiyun 		fallthrough;
197*4882a593Smuzhiyun 	default:
198*4882a593Smuzhiyun 		vsg->state = dr_via_sg_init;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 	vfree(vsg->bounce_buffer);
201*4882a593Smuzhiyun 	vsg->bounce_buffer = NULL;
202*4882a593Smuzhiyun 	vsg->free_on_sequence = 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * Fire a blit engine.
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun static void
via_fire_dmablit(struct drm_device * dev,drm_via_sg_info_t * vsg,int engine)210*4882a593Smuzhiyun via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217*4882a593Smuzhiyun 		  VIA_DMA_CSR_DE);
218*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221*4882a593Smuzhiyun 	wmb();
222*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
223*4882a593Smuzhiyun 	via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun  * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
228*4882a593Smuzhiyun  * occur here if the calling user does not have access to the submitted address.
229*4882a593Smuzhiyun  */
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun static int
via_lock_all_dma_pages(drm_via_sg_info_t * vsg,drm_via_dmablit_t * xfer)232*4882a593Smuzhiyun via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	int ret;
235*4882a593Smuzhiyun 	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
236*4882a593Smuzhiyun 	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
237*4882a593Smuzhiyun 		first_pfn + 1;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
240*4882a593Smuzhiyun 	if (NULL == vsg->pages)
241*4882a593Smuzhiyun 		return -ENOMEM;
242*4882a593Smuzhiyun 	ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
243*4882a593Smuzhiyun 			vsg->num_pages,
244*4882a593Smuzhiyun 			vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
245*4882a593Smuzhiyun 			vsg->pages);
246*4882a593Smuzhiyun 	if (ret != vsg->num_pages) {
247*4882a593Smuzhiyun 		if (ret < 0)
248*4882a593Smuzhiyun 			return ret;
249*4882a593Smuzhiyun 		vsg->state = dr_via_pages_locked;
250*4882a593Smuzhiyun 		return -EINVAL;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 	vsg->state = dr_via_pages_locked;
253*4882a593Smuzhiyun 	DRM_DEBUG("DMA pages locked\n");
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun  * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
259*4882a593Smuzhiyun  * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
260*4882a593Smuzhiyun  * quite large for some blits, and pages don't need to be contiguous.
261*4882a593Smuzhiyun  */
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun static int
via_alloc_desc_pages(drm_via_sg_info_t * vsg)264*4882a593Smuzhiyun via_alloc_desc_pages(drm_via_sg_info_t *vsg)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	int i;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
269*4882a593Smuzhiyun 	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
270*4882a593Smuzhiyun 		vsg->descriptors_per_page;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
273*4882a593Smuzhiyun 		return -ENOMEM;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	vsg->state = dr_via_desc_pages_alloc;
276*4882a593Smuzhiyun 	for (i = 0; i < vsg->num_desc_pages; ++i) {
277*4882a593Smuzhiyun 		if (NULL == (vsg->desc_pages[i] =
278*4882a593Smuzhiyun 			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
279*4882a593Smuzhiyun 			return -ENOMEM;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
282*4882a593Smuzhiyun 		  vsg->num_desc);
283*4882a593Smuzhiyun 	return 0;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun static void
via_abort_dmablit(struct drm_device * dev,int engine)287*4882a593Smuzhiyun via_abort_dmablit(struct drm_device *dev, int engine)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun static void
via_dmablit_engine_off(struct drm_device * dev,int engine)295*4882a593Smuzhiyun via_dmablit_engine_off(struct drm_device *dev, int engine)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
306*4882a593Smuzhiyun  * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
307*4882a593Smuzhiyun  * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
308*4882a593Smuzhiyun  * the workqueue task takes care of processing associated with the old blit.
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun void
via_dmablit_handler(struct drm_device * dev,int engine,int from_irq)312*4882a593Smuzhiyun via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
315*4882a593Smuzhiyun 	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
316*4882a593Smuzhiyun 	int cur;
317*4882a593Smuzhiyun 	int done_transfer;
318*4882a593Smuzhiyun 	unsigned long irqsave = 0;
319*4882a593Smuzhiyun 	uint32_t status = 0;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
322*4882a593Smuzhiyun 		  engine, from_irq, (unsigned long) blitq);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (from_irq)
325*4882a593Smuzhiyun 		spin_lock(&blitq->blit_lock);
326*4882a593Smuzhiyun 	else
327*4882a593Smuzhiyun 		spin_lock_irqsave(&blitq->blit_lock, irqsave);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	done_transfer = blitq->is_active &&
330*4882a593Smuzhiyun 	  ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
331*4882a593Smuzhiyun 	done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	cur = blitq->cur;
334*4882a593Smuzhiyun 	if (done_transfer) {
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		blitq->blits[cur]->aborted = blitq->aborting;
337*4882a593Smuzhiyun 		blitq->done_blit_handle++;
338*4882a593Smuzhiyun 		wake_up(blitq->blit_queue + cur);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		cur++;
341*4882a593Smuzhiyun 		if (cur >= VIA_NUM_BLIT_SLOTS)
342*4882a593Smuzhiyun 			cur = 0;
343*4882a593Smuzhiyun 		blitq->cur = cur;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		/*
346*4882a593Smuzhiyun 		 * Clear transfer done flag.
347*4882a593Smuzhiyun 		 */
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		blitq->is_active = 0;
352*4882a593Smuzhiyun 		blitq->aborting = 0;
353*4882a593Smuzhiyun 		schedule_work(&blitq->wq);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		/*
358*4882a593Smuzhiyun 		 * Abort transfer after one second.
359*4882a593Smuzhiyun 		 */
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		via_abort_dmablit(dev, engine);
362*4882a593Smuzhiyun 		blitq->aborting = 1;
363*4882a593Smuzhiyun 		blitq->end = jiffies + HZ;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (!blitq->is_active) {
367*4882a593Smuzhiyun 		if (blitq->num_outstanding) {
368*4882a593Smuzhiyun 			via_fire_dmablit(dev, blitq->blits[cur], engine);
369*4882a593Smuzhiyun 			blitq->is_active = 1;
370*4882a593Smuzhiyun 			blitq->cur = cur;
371*4882a593Smuzhiyun 			blitq->num_outstanding--;
372*4882a593Smuzhiyun 			blitq->end = jiffies + HZ;
373*4882a593Smuzhiyun 			if (!timer_pending(&blitq->poll_timer))
374*4882a593Smuzhiyun 				mod_timer(&blitq->poll_timer, jiffies + 1);
375*4882a593Smuzhiyun 		} else {
376*4882a593Smuzhiyun 			if (timer_pending(&blitq->poll_timer))
377*4882a593Smuzhiyun 				del_timer(&blitq->poll_timer);
378*4882a593Smuzhiyun 			via_dmablit_engine_off(dev, engine);
379*4882a593Smuzhiyun 		}
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (from_irq)
383*4882a593Smuzhiyun 		spin_unlock(&blitq->blit_lock);
384*4882a593Smuzhiyun 	else
385*4882a593Smuzhiyun 		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun  * Check whether this blit is still active, performing necessary locking.
392*4882a593Smuzhiyun  */
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun static int
via_dmablit_active(drm_via_blitq_t * blitq,int engine,uint32_t handle,wait_queue_head_t ** queue)395*4882a593Smuzhiyun via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	unsigned long irqsave;
398*4882a593Smuzhiyun 	uint32_t slot;
399*4882a593Smuzhiyun 	int active;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	spin_lock_irqsave(&blitq->blit_lock, irqsave);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	/*
404*4882a593Smuzhiyun 	 * Allow for handle wraparounds.
405*4882a593Smuzhiyun 	 */
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
408*4882a593Smuzhiyun 		((blitq->cur_blit_handle - handle) <= (1 << 23));
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (queue && active) {
411*4882a593Smuzhiyun 		slot = handle - blitq->done_blit_handle + blitq->cur - 1;
412*4882a593Smuzhiyun 		if (slot >= VIA_NUM_BLIT_SLOTS)
413*4882a593Smuzhiyun 			slot -= VIA_NUM_BLIT_SLOTS;
414*4882a593Smuzhiyun 		*queue = blitq->blit_queue + slot;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	return active;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun  * Sync. Wait for at least three seconds for the blit to be performed.
424*4882a593Smuzhiyun  */
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun static int
via_dmablit_sync(struct drm_device * dev,uint32_t handle,int engine)427*4882a593Smuzhiyun via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
431*4882a593Smuzhiyun 	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
432*4882a593Smuzhiyun 	wait_queue_head_t *queue;
433*4882a593Smuzhiyun 	int ret = 0;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (via_dmablit_active(blitq, engine, handle, &queue)) {
436*4882a593Smuzhiyun 		VIA_WAIT_ON(ret, *queue, 3 * HZ,
437*4882a593Smuzhiyun 			    !via_dmablit_active(blitq, engine, handle, NULL));
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
440*4882a593Smuzhiyun 		  handle, engine, ret);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	return ret;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun  * A timer that regularly polls the blit engine in cases where we don't have interrupts:
448*4882a593Smuzhiyun  * a) Broken hardware (typically those that don't have any video capture facility).
449*4882a593Smuzhiyun  * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
450*4882a593Smuzhiyun  * The timer and hardware IRQ's can and do work in parallel. If the hardware has
451*4882a593Smuzhiyun  * irqs, it will shorten the latency somewhat.
452*4882a593Smuzhiyun  */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun static void
via_dmablit_timer(struct timer_list * t)457*4882a593Smuzhiyun via_dmablit_timer(struct timer_list *t)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
460*4882a593Smuzhiyun 	struct drm_device *dev = blitq->dev;
461*4882a593Smuzhiyun 	int engine = (int)
462*4882a593Smuzhiyun 		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
465*4882a593Smuzhiyun 		  (unsigned long) jiffies);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	via_dmablit_handler(dev, engine, 0);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (!timer_pending(&blitq->poll_timer)) {
470*4882a593Smuzhiyun 		mod_timer(&blitq->poll_timer, jiffies + 1);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	       /*
473*4882a593Smuzhiyun 		* Rerun handler to delete timer if engines are off, and
474*4882a593Smuzhiyun 		* to shorten abort latency. This is a little nasty.
475*4882a593Smuzhiyun 		*/
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	       via_dmablit_handler(dev, engine, 0);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /*
486*4882a593Smuzhiyun  * Workqueue task that frees data and mappings associated with a blit.
487*4882a593Smuzhiyun  * Also wakes up waiting processes. Each of these tasks handles one
488*4882a593Smuzhiyun  * blit engine only and may not be called on each interrupt.
489*4882a593Smuzhiyun  */
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun static void
via_dmablit_workqueue(struct work_struct * work)493*4882a593Smuzhiyun via_dmablit_workqueue(struct work_struct *work)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
496*4882a593Smuzhiyun 	struct drm_device *dev = blitq->dev;
497*4882a593Smuzhiyun 	unsigned long irqsave;
498*4882a593Smuzhiyun 	drm_via_sg_info_t *cur_sg;
499*4882a593Smuzhiyun 	int cur_released;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
503*4882a593Smuzhiyun 		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	spin_lock_irqsave(&blitq->blit_lock, irqsave);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	while (blitq->serviced != blitq->cur) {
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 		cur_released = blitq->serviced++;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		DRM_DEBUG("Releasing blit slot %d\n", cur_released);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
514*4882a593Smuzhiyun 			blitq->serviced = 0;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 		cur_sg = blitq->blits[cur_released];
517*4882a593Smuzhiyun 		blitq->num_free++;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		wake_up(&blitq->busy_queue);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 		via_free_sg_info(dev->pdev, cur_sg);
524*4882a593Smuzhiyun 		kfree(cur_sg);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 		spin_lock_irqsave(&blitq->blit_lock, irqsave);
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun  * Init all blit engines. Currently we use two, but some hardware have 4.
535*4882a593Smuzhiyun  */
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun void
via_init_dmablit(struct drm_device * dev)539*4882a593Smuzhiyun via_init_dmablit(struct drm_device *dev)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	int i, j;
542*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
543*4882a593Smuzhiyun 	drm_via_blitq_t *blitq;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	pci_set_master(dev->pdev);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
548*4882a593Smuzhiyun 		blitq = dev_priv->blit_queues + i;
549*4882a593Smuzhiyun 		blitq->dev = dev;
550*4882a593Smuzhiyun 		blitq->cur_blit_handle = 0;
551*4882a593Smuzhiyun 		blitq->done_blit_handle = 0;
552*4882a593Smuzhiyun 		blitq->head = 0;
553*4882a593Smuzhiyun 		blitq->cur = 0;
554*4882a593Smuzhiyun 		blitq->serviced = 0;
555*4882a593Smuzhiyun 		blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
556*4882a593Smuzhiyun 		blitq->num_outstanding = 0;
557*4882a593Smuzhiyun 		blitq->is_active = 0;
558*4882a593Smuzhiyun 		blitq->aborting = 0;
559*4882a593Smuzhiyun 		spin_lock_init(&blitq->blit_lock);
560*4882a593Smuzhiyun 		for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
561*4882a593Smuzhiyun 			init_waitqueue_head(blitq->blit_queue + j);
562*4882a593Smuzhiyun 		init_waitqueue_head(&blitq->busy_queue);
563*4882a593Smuzhiyun 		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
564*4882a593Smuzhiyun 		timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun  * Build all info and do all mappings required for a blit.
570*4882a593Smuzhiyun  */
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun static int
via_build_sg_info(struct drm_device * dev,drm_via_sg_info_t * vsg,drm_via_dmablit_t * xfer)574*4882a593Smuzhiyun via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	int draw = xfer->to_fb;
577*4882a593Smuzhiyun 	int ret = 0;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
580*4882a593Smuzhiyun 	vsg->bounce_buffer = NULL;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	vsg->state = dr_via_sg_init;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
585*4882a593Smuzhiyun 		DRM_ERROR("Zero size bitblt.\n");
586*4882a593Smuzhiyun 		return -EINVAL;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	/*
590*4882a593Smuzhiyun 	 * Below check is a driver limitation, not a hardware one. We
591*4882a593Smuzhiyun 	 * don't want to lock unused pages, and don't want to incoporate the
592*4882a593Smuzhiyun 	 * extra logic of avoiding them. Make sure there are no.
593*4882a593Smuzhiyun 	 * (Not a big limitation anyway.)
594*4882a593Smuzhiyun 	 */
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
597*4882a593Smuzhiyun 		DRM_ERROR("Too large system memory stride. Stride: %d, "
598*4882a593Smuzhiyun 			  "Length: %d\n", xfer->mem_stride, xfer->line_length);
599*4882a593Smuzhiyun 		return -EINVAL;
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if ((xfer->mem_stride == xfer->line_length) &&
603*4882a593Smuzhiyun 	   (xfer->fb_stride == xfer->line_length)) {
604*4882a593Smuzhiyun 		xfer->mem_stride *= xfer->num_lines;
605*4882a593Smuzhiyun 		xfer->line_length = xfer->mem_stride;
606*4882a593Smuzhiyun 		xfer->fb_stride = xfer->mem_stride;
607*4882a593Smuzhiyun 		xfer->num_lines = 1;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/*
611*4882a593Smuzhiyun 	 * Don't lock an arbitrary large number of pages, since that causes a
612*4882a593Smuzhiyun 	 * DOS security hole.
613*4882a593Smuzhiyun 	 */
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
616*4882a593Smuzhiyun 		DRM_ERROR("Too large PCI DMA bitblt.\n");
617*4882a593Smuzhiyun 		return -EINVAL;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/*
621*4882a593Smuzhiyun 	 * we allow a negative fb stride to allow flipping of images in
622*4882a593Smuzhiyun 	 * transfer.
623*4882a593Smuzhiyun 	 */
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (xfer->mem_stride < xfer->line_length ||
626*4882a593Smuzhiyun 		abs(xfer->fb_stride) < xfer->line_length) {
627*4882a593Smuzhiyun 		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
628*4882a593Smuzhiyun 		return -EINVAL;
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	/*
632*4882a593Smuzhiyun 	 * A hardware bug seems to be worked around if system memory addresses start on
633*4882a593Smuzhiyun 	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
634*4882a593Smuzhiyun 	 * about this. Meanwhile, impose the following restrictions:
635*4882a593Smuzhiyun 	 */
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun #ifdef VIA_BUGFREE
638*4882a593Smuzhiyun 	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
639*4882a593Smuzhiyun 	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
640*4882a593Smuzhiyun 		DRM_ERROR("Invalid DRM bitblt alignment.\n");
641*4882a593Smuzhiyun 		return -EINVAL;
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun #else
644*4882a593Smuzhiyun 	if ((((unsigned long)xfer->mem_addr & 15) ||
645*4882a593Smuzhiyun 	      ((unsigned long)xfer->fb_addr & 3)) ||
646*4882a593Smuzhiyun 	   ((xfer->num_lines > 1) &&
647*4882a593Smuzhiyun 	   ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
648*4882a593Smuzhiyun 		DRM_ERROR("Invalid DRM bitblt alignment.\n");
649*4882a593Smuzhiyun 		return -EINVAL;
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun #endif
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
654*4882a593Smuzhiyun 		DRM_ERROR("Could not lock DMA pages.\n");
655*4882a593Smuzhiyun 		via_free_sg_info(dev->pdev, vsg);
656*4882a593Smuzhiyun 		return ret;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
660*4882a593Smuzhiyun 	if (0 != (ret = via_alloc_desc_pages(vsg))) {
661*4882a593Smuzhiyun 		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
662*4882a593Smuzhiyun 		via_free_sg_info(dev->pdev, vsg);
663*4882a593Smuzhiyun 		return ret;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	return 0;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun /*
672*4882a593Smuzhiyun  * Reserve one free slot in the blit queue. Will wait for one second for one
673*4882a593Smuzhiyun  * to become available. Otherwise -EBUSY is returned.
674*4882a593Smuzhiyun  */
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun static int
via_dmablit_grab_slot(drm_via_blitq_t * blitq,int engine)677*4882a593Smuzhiyun via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	int ret = 0;
680*4882a593Smuzhiyun 	unsigned long irqsave;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	DRM_DEBUG("Num free is %d\n", blitq->num_free);
683*4882a593Smuzhiyun 	spin_lock_irqsave(&blitq->blit_lock, irqsave);
684*4882a593Smuzhiyun 	while (blitq->num_free == 0) {
685*4882a593Smuzhiyun 		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
688*4882a593Smuzhiyun 		if (ret)
689*4882a593Smuzhiyun 			return (-EINTR == ret) ? -EAGAIN : ret;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		spin_lock_irqsave(&blitq->blit_lock, irqsave);
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	blitq->num_free--;
695*4882a593Smuzhiyun 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	return 0;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun  * Hand back a free slot if we changed our mind.
702*4882a593Smuzhiyun  */
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun static void
via_dmablit_release_slot(drm_via_blitq_t * blitq)705*4882a593Smuzhiyun via_dmablit_release_slot(drm_via_blitq_t *blitq)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	unsigned long irqsave;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	spin_lock_irqsave(&blitq->blit_lock, irqsave);
710*4882a593Smuzhiyun 	blitq->num_free++;
711*4882a593Smuzhiyun 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
712*4882a593Smuzhiyun 	wake_up(&blitq->busy_queue);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun  * Grab a free slot. Build blit info and queue a blit.
717*4882a593Smuzhiyun  */
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun static int
via_dmablit(struct drm_device * dev,drm_via_dmablit_t * xfer)721*4882a593Smuzhiyun via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
724*4882a593Smuzhiyun 	drm_via_sg_info_t *vsg;
725*4882a593Smuzhiyun 	drm_via_blitq_t *blitq;
726*4882a593Smuzhiyun 	int ret;
727*4882a593Smuzhiyun 	int engine;
728*4882a593Smuzhiyun 	unsigned long irqsave;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (dev_priv == NULL) {
731*4882a593Smuzhiyun 		DRM_ERROR("Called without initialization.\n");
732*4882a593Smuzhiyun 		return -EINVAL;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	engine = (xfer->to_fb) ? 0 : 1;
736*4882a593Smuzhiyun 	blitq = dev_priv->blit_queues + engine;
737*4882a593Smuzhiyun 	if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
738*4882a593Smuzhiyun 		return ret;
739*4882a593Smuzhiyun 	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
740*4882a593Smuzhiyun 		via_dmablit_release_slot(blitq);
741*4882a593Smuzhiyun 		return -ENOMEM;
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
744*4882a593Smuzhiyun 		via_dmablit_release_slot(blitq);
745*4882a593Smuzhiyun 		kfree(vsg);
746*4882a593Smuzhiyun 		return ret;
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 	spin_lock_irqsave(&blitq->blit_lock, irqsave);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	blitq->blits[blitq->head++] = vsg;
751*4882a593Smuzhiyun 	if (blitq->head >= VIA_NUM_BLIT_SLOTS)
752*4882a593Smuzhiyun 		blitq->head = 0;
753*4882a593Smuzhiyun 	blitq->num_outstanding++;
754*4882a593Smuzhiyun 	xfer->sync.sync_handle = ++blitq->cur_blit_handle;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
757*4882a593Smuzhiyun 	xfer->sync.engine = engine;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	via_dmablit_handler(dev, engine, 0);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	return 0;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun /*
765*4882a593Smuzhiyun  * Sync on a previously submitted blit. Note that the X server use signals extensively, and
766*4882a593Smuzhiyun  * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
767*4882a593Smuzhiyun  * case it returns with -EAGAIN for the signal to be delivered.
768*4882a593Smuzhiyun  * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
769*4882a593Smuzhiyun  */
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun int
via_dma_blit_sync(struct drm_device * dev,void * data,struct drm_file * file_priv)772*4882a593Smuzhiyun via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	drm_via_blitsync_t *sync = data;
775*4882a593Smuzhiyun 	int err;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (sync->engine >= VIA_NUM_BLIT_ENGINES)
778*4882a593Smuzhiyun 		return -EINVAL;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	if (-EINTR == err)
783*4882a593Smuzhiyun 		err = -EAGAIN;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	return err;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun /*
790*4882a593Smuzhiyun  * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
791*4882a593Smuzhiyun  * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
792*4882a593Smuzhiyun  * be reissued. See the above IOCTL code.
793*4882a593Smuzhiyun  */
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun int
via_dma_blit(struct drm_device * dev,void * data,struct drm_file * file_priv)796*4882a593Smuzhiyun via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	drm_via_dmablit_t *xfer = data;
799*4882a593Smuzhiyun 	int err;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	err = via_dmablit(dev, xfer);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	return err;
804*4882a593Smuzhiyun }
805