xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/savage/savage_state.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* savage_state.c -- State and drawing support for Savage
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright 2004  Felix Kuehling
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
14*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
15*4882a593Smuzhiyun  * of the Software.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20*4882a593Smuzhiyun  * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21*4882a593Smuzhiyun  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22*4882a593Smuzhiyun  * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23*4882a593Smuzhiyun  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/uaccess.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <drm/drm_device.h>
30*4882a593Smuzhiyun #include <drm/drm_file.h>
31*4882a593Smuzhiyun #include <drm/drm_print.h>
32*4882a593Smuzhiyun #include <drm/savage_drm.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "savage_drv.h"
35*4882a593Smuzhiyun 
savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,const struct drm_clip_rect * pbox)36*4882a593Smuzhiyun void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
37*4882a593Smuzhiyun 			       const struct drm_clip_rect * pbox)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	uint32_t scstart = dev_priv->state.s3d.new_scstart;
40*4882a593Smuzhiyun 	uint32_t scend = dev_priv->state.s3d.new_scend;
41*4882a593Smuzhiyun 	scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
42*4882a593Smuzhiyun 	    ((uint32_t) pbox->x1 & 0x000007ff) |
43*4882a593Smuzhiyun 	    (((uint32_t) pbox->y1 << 16) & 0x07ff0000);
44*4882a593Smuzhiyun 	scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
45*4882a593Smuzhiyun 	    (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
46*4882a593Smuzhiyun 	    ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000);
47*4882a593Smuzhiyun 	if (scstart != dev_priv->state.s3d.scstart ||
48*4882a593Smuzhiyun 	    scend != dev_priv->state.s3d.scend) {
49*4882a593Smuzhiyun 		DMA_LOCALS;
50*4882a593Smuzhiyun 		BEGIN_DMA(4);
51*4882a593Smuzhiyun 		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
52*4882a593Smuzhiyun 		DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
53*4882a593Smuzhiyun 		DMA_WRITE(scstart);
54*4882a593Smuzhiyun 		DMA_WRITE(scend);
55*4882a593Smuzhiyun 		dev_priv->state.s3d.scstart = scstart;
56*4882a593Smuzhiyun 		dev_priv->state.s3d.scend = scend;
57*4882a593Smuzhiyun 		dev_priv->waiting = 1;
58*4882a593Smuzhiyun 		DMA_COMMIT();
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,const struct drm_clip_rect * pbox)62*4882a593Smuzhiyun void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
63*4882a593Smuzhiyun 			      const struct drm_clip_rect * pbox)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
66*4882a593Smuzhiyun 	uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
67*4882a593Smuzhiyun 	drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
68*4882a593Smuzhiyun 	    ((uint32_t) pbox->x1 & 0x000007ff) |
69*4882a593Smuzhiyun 	    (((uint32_t) pbox->y1 << 12) & 0x00fff000);
70*4882a593Smuzhiyun 	drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
71*4882a593Smuzhiyun 	    (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
72*4882a593Smuzhiyun 	    ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000);
73*4882a593Smuzhiyun 	if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
74*4882a593Smuzhiyun 	    drawctrl1 != dev_priv->state.s4.drawctrl1) {
75*4882a593Smuzhiyun 		DMA_LOCALS;
76*4882a593Smuzhiyun 		BEGIN_DMA(4);
77*4882a593Smuzhiyun 		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
78*4882a593Smuzhiyun 		DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
79*4882a593Smuzhiyun 		DMA_WRITE(drawctrl0);
80*4882a593Smuzhiyun 		DMA_WRITE(drawctrl1);
81*4882a593Smuzhiyun 		dev_priv->state.s4.drawctrl0 = drawctrl0;
82*4882a593Smuzhiyun 		dev_priv->state.s4.drawctrl1 = drawctrl1;
83*4882a593Smuzhiyun 		dev_priv->waiting = 1;
84*4882a593Smuzhiyun 		DMA_COMMIT();
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
savage_verify_texaddr(drm_savage_private_t * dev_priv,int unit,uint32_t addr)88*4882a593Smuzhiyun static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
89*4882a593Smuzhiyun 				 uint32_t addr)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	if ((addr & 6) != 2) {	/* reserved bits */
92*4882a593Smuzhiyun 		DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
93*4882a593Smuzhiyun 		return -EINVAL;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 	if (!(addr & 1)) {	/* local */
96*4882a593Smuzhiyun 		addr &= ~7;
97*4882a593Smuzhiyun 		if (addr < dev_priv->texture_offset ||
98*4882a593Smuzhiyun 		    addr >= dev_priv->texture_offset + dev_priv->texture_size) {
99*4882a593Smuzhiyun 			DRM_ERROR
100*4882a593Smuzhiyun 			    ("bad texAddr%d %08x (local addr out of range)\n",
101*4882a593Smuzhiyun 			     unit, addr);
102*4882a593Smuzhiyun 			return -EINVAL;
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 	} else {		/* AGP */
105*4882a593Smuzhiyun 		if (!dev_priv->agp_textures) {
106*4882a593Smuzhiyun 			DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
107*4882a593Smuzhiyun 				  unit, addr);
108*4882a593Smuzhiyun 			return -EINVAL;
109*4882a593Smuzhiyun 		}
110*4882a593Smuzhiyun 		addr &= ~7;
111*4882a593Smuzhiyun 		if (addr < dev_priv->agp_textures->offset ||
112*4882a593Smuzhiyun 		    addr >= (dev_priv->agp_textures->offset +
113*4882a593Smuzhiyun 			     dev_priv->agp_textures->size)) {
114*4882a593Smuzhiyun 			DRM_ERROR
115*4882a593Smuzhiyun 			    ("bad texAddr%d %08x (AGP addr out of range)\n",
116*4882a593Smuzhiyun 			     unit, addr);
117*4882a593Smuzhiyun 			return -EINVAL;
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	return 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define SAVE_STATE(reg,where)			\
124*4882a593Smuzhiyun 	if(start <= reg && start+count > reg)	\
125*4882a593Smuzhiyun 		dev_priv->state.where = regs[reg - start]
126*4882a593Smuzhiyun #define SAVE_STATE_MASK(reg,where,mask) do {			\
127*4882a593Smuzhiyun 	if(start <= reg && start+count > reg) {			\
128*4882a593Smuzhiyun 		uint32_t tmp;					\
129*4882a593Smuzhiyun 		tmp = regs[reg - start];			\
130*4882a593Smuzhiyun 		dev_priv->state.where = (tmp & (mask)) |	\
131*4882a593Smuzhiyun 			(dev_priv->state.where & ~(mask));	\
132*4882a593Smuzhiyun 	}							\
133*4882a593Smuzhiyun } while (0)
134*4882a593Smuzhiyun 
savage_verify_state_s3d(drm_savage_private_t * dev_priv,unsigned int start,unsigned int count,const uint32_t * regs)135*4882a593Smuzhiyun static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
136*4882a593Smuzhiyun 				   unsigned int start, unsigned int count,
137*4882a593Smuzhiyun 				   const uint32_t *regs)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	if (start < SAVAGE_TEXPALADDR_S3D ||
140*4882a593Smuzhiyun 	    start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
141*4882a593Smuzhiyun 		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
142*4882a593Smuzhiyun 			  start, start + count - 1);
143*4882a593Smuzhiyun 		return -EINVAL;
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
147*4882a593Smuzhiyun 			~SAVAGE_SCISSOR_MASK_S3D);
148*4882a593Smuzhiyun 	SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
149*4882a593Smuzhiyun 			~SAVAGE_SCISSOR_MASK_S3D);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* if any texture regs were changed ... */
152*4882a593Smuzhiyun 	if (start <= SAVAGE_TEXCTRL_S3D &&
153*4882a593Smuzhiyun 	    start + count > SAVAGE_TEXPALADDR_S3D) {
154*4882a593Smuzhiyun 		/* ... check texture state */
155*4882a593Smuzhiyun 		SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
156*4882a593Smuzhiyun 		SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
157*4882a593Smuzhiyun 		if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
158*4882a593Smuzhiyun 			return savage_verify_texaddr(dev_priv, 0,
159*4882a593Smuzhiyun 						dev_priv->state.s3d.texaddr);
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
savage_verify_state_s4(drm_savage_private_t * dev_priv,unsigned int start,unsigned int count,const uint32_t * regs)165*4882a593Smuzhiyun static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
166*4882a593Smuzhiyun 				  unsigned int start, unsigned int count,
167*4882a593Smuzhiyun 				  const uint32_t *regs)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	int ret = 0;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
172*4882a593Smuzhiyun 	    start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
173*4882a593Smuzhiyun 		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
174*4882a593Smuzhiyun 			  start, start + count - 1);
175*4882a593Smuzhiyun 		return -EINVAL;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
179*4882a593Smuzhiyun 			~SAVAGE_SCISSOR_MASK_S4);
180*4882a593Smuzhiyun 	SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
181*4882a593Smuzhiyun 			~SAVAGE_SCISSOR_MASK_S4);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* if any texture regs were changed ... */
184*4882a593Smuzhiyun 	if (start <= SAVAGE_TEXDESCR_S4 &&
185*4882a593Smuzhiyun 	    start + count > SAVAGE_TEXPALADDR_S4) {
186*4882a593Smuzhiyun 		/* ... check texture state */
187*4882a593Smuzhiyun 		SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
188*4882a593Smuzhiyun 		SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
189*4882a593Smuzhiyun 		SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
190*4882a593Smuzhiyun 		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
191*4882a593Smuzhiyun 			ret |= savage_verify_texaddr(dev_priv, 0,
192*4882a593Smuzhiyun 						dev_priv->state.s4.texaddr0);
193*4882a593Smuzhiyun 		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
194*4882a593Smuzhiyun 			ret |= savage_verify_texaddr(dev_priv, 1,
195*4882a593Smuzhiyun 						dev_priv->state.s4.texaddr1);
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return ret;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #undef SAVE_STATE
202*4882a593Smuzhiyun #undef SAVE_STATE_MASK
203*4882a593Smuzhiyun 
savage_dispatch_state(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint32_t * regs)204*4882a593Smuzhiyun static int savage_dispatch_state(drm_savage_private_t * dev_priv,
205*4882a593Smuzhiyun 				 const drm_savage_cmd_header_t * cmd_header,
206*4882a593Smuzhiyun 				 const uint32_t *regs)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	unsigned int count = cmd_header->state.count;
209*4882a593Smuzhiyun 	unsigned int start = cmd_header->state.start;
210*4882a593Smuzhiyun 	unsigned int count2 = 0;
211*4882a593Smuzhiyun 	unsigned int bci_size;
212*4882a593Smuzhiyun 	int ret;
213*4882a593Smuzhiyun 	DMA_LOCALS;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (!count)
216*4882a593Smuzhiyun 		return 0;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
219*4882a593Smuzhiyun 		ret = savage_verify_state_s3d(dev_priv, start, count, regs);
220*4882a593Smuzhiyun 		if (ret != 0)
221*4882a593Smuzhiyun 			return ret;
222*4882a593Smuzhiyun 		/* scissor regs are emitted in savage_dispatch_draw */
223*4882a593Smuzhiyun 		if (start < SAVAGE_SCSTART_S3D) {
224*4882a593Smuzhiyun 			if (start + count > SAVAGE_SCEND_S3D + 1)
225*4882a593Smuzhiyun 				count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
226*4882a593Smuzhiyun 			if (start + count > SAVAGE_SCSTART_S3D)
227*4882a593Smuzhiyun 				count = SAVAGE_SCSTART_S3D - start;
228*4882a593Smuzhiyun 		} else if (start <= SAVAGE_SCEND_S3D) {
229*4882a593Smuzhiyun 			if (start + count > SAVAGE_SCEND_S3D + 1) {
230*4882a593Smuzhiyun 				count -= SAVAGE_SCEND_S3D + 1 - start;
231*4882a593Smuzhiyun 				start = SAVAGE_SCEND_S3D + 1;
232*4882a593Smuzhiyun 			} else
233*4882a593Smuzhiyun 				return 0;
234*4882a593Smuzhiyun 		}
235*4882a593Smuzhiyun 	} else {
236*4882a593Smuzhiyun 		ret = savage_verify_state_s4(dev_priv, start, count, regs);
237*4882a593Smuzhiyun 		if (ret != 0)
238*4882a593Smuzhiyun 			return ret;
239*4882a593Smuzhiyun 		/* scissor regs are emitted in savage_dispatch_draw */
240*4882a593Smuzhiyun 		if (start < SAVAGE_DRAWCTRL0_S4) {
241*4882a593Smuzhiyun 			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
242*4882a593Smuzhiyun 				count2 = count -
243*4882a593Smuzhiyun 					 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
244*4882a593Smuzhiyun 			if (start + count > SAVAGE_DRAWCTRL0_S4)
245*4882a593Smuzhiyun 				count = SAVAGE_DRAWCTRL0_S4 - start;
246*4882a593Smuzhiyun 		} else if (start <= SAVAGE_DRAWCTRL1_S4) {
247*4882a593Smuzhiyun 			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
248*4882a593Smuzhiyun 				count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
249*4882a593Smuzhiyun 				start = SAVAGE_DRAWCTRL1_S4 + 1;
250*4882a593Smuzhiyun 			} else
251*4882a593Smuzhiyun 				return 0;
252*4882a593Smuzhiyun 		}
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (cmd_header->state.global) {
258*4882a593Smuzhiyun 		BEGIN_DMA(bci_size + 1);
259*4882a593Smuzhiyun 		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
260*4882a593Smuzhiyun 		dev_priv->waiting = 1;
261*4882a593Smuzhiyun 	} else {
262*4882a593Smuzhiyun 		BEGIN_DMA(bci_size);
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	do {
266*4882a593Smuzhiyun 		while (count > 0) {
267*4882a593Smuzhiyun 			unsigned int n = count < 255 ? count : 255;
268*4882a593Smuzhiyun 			DMA_SET_REGISTERS(start, n);
269*4882a593Smuzhiyun 			DMA_COPY(regs, n);
270*4882a593Smuzhiyun 			count -= n;
271*4882a593Smuzhiyun 			start += n;
272*4882a593Smuzhiyun 			regs += n;
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 		start += 2;
275*4882a593Smuzhiyun 		regs += 2;
276*4882a593Smuzhiyun 		count = count2;
277*4882a593Smuzhiyun 		count2 = 0;
278*4882a593Smuzhiyun 	} while (count);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	DMA_COMMIT();
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const struct drm_buf * dmabuf)285*4882a593Smuzhiyun static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
286*4882a593Smuzhiyun 				    const drm_savage_cmd_header_t * cmd_header,
287*4882a593Smuzhiyun 				    const struct drm_buf * dmabuf)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	unsigned char reorder = 0;
290*4882a593Smuzhiyun 	unsigned int prim = cmd_header->prim.prim;
291*4882a593Smuzhiyun 	unsigned int skip = cmd_header->prim.skip;
292*4882a593Smuzhiyun 	unsigned int n = cmd_header->prim.count;
293*4882a593Smuzhiyun 	unsigned int start = cmd_header->prim.start;
294*4882a593Smuzhiyun 	unsigned int i;
295*4882a593Smuzhiyun 	BCI_LOCALS;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (!dmabuf) {
298*4882a593Smuzhiyun 		DRM_ERROR("called without dma buffers!\n");
299*4882a593Smuzhiyun 		return -EINVAL;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (!n)
303*4882a593Smuzhiyun 		return 0;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	switch (prim) {
306*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST_201:
307*4882a593Smuzhiyun 		reorder = 1;
308*4882a593Smuzhiyun 		prim = SAVAGE_PRIM_TRILIST;
309*4882a593Smuzhiyun 		fallthrough;
310*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST:
311*4882a593Smuzhiyun 		if (n % 3 != 0) {
312*4882a593Smuzhiyun 			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
313*4882a593Smuzhiyun 				  n);
314*4882a593Smuzhiyun 			return -EINVAL;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 		break;
317*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRISTRIP:
318*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRIFAN:
319*4882a593Smuzhiyun 		if (n < 3) {
320*4882a593Smuzhiyun 			DRM_ERROR
321*4882a593Smuzhiyun 			    ("wrong number of vertices %u in TRIFAN/STRIP\n",
322*4882a593Smuzhiyun 			     n);
323*4882a593Smuzhiyun 			return -EINVAL;
324*4882a593Smuzhiyun 		}
325*4882a593Smuzhiyun 		break;
326*4882a593Smuzhiyun 	default:
327*4882a593Smuzhiyun 		DRM_ERROR("invalid primitive type %u\n", prim);
328*4882a593Smuzhiyun 		return -EINVAL;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
332*4882a593Smuzhiyun 		if (skip != 0) {
333*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
334*4882a593Smuzhiyun 			return -EINVAL;
335*4882a593Smuzhiyun 		}
336*4882a593Smuzhiyun 	} else {
337*4882a593Smuzhiyun 		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
338*4882a593Smuzhiyun 		    (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
339*4882a593Smuzhiyun 		    (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
340*4882a593Smuzhiyun 		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
341*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
342*4882a593Smuzhiyun 			return -EINVAL;
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 		if (reorder) {
345*4882a593Smuzhiyun 			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
346*4882a593Smuzhiyun 			return -EINVAL;
347*4882a593Smuzhiyun 		}
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (start + n > dmabuf->total / 32) {
351*4882a593Smuzhiyun 		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
352*4882a593Smuzhiyun 			  start, start + n - 1, dmabuf->total / 32);
353*4882a593Smuzhiyun 		return -EINVAL;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* Vertex DMA doesn't work with command DMA at the same time,
357*4882a593Smuzhiyun 	 * so we use BCI_... to submit commands here. Flush buffered
358*4882a593Smuzhiyun 	 * faked DMA first. */
359*4882a593Smuzhiyun 	DMA_FLUSH();
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
362*4882a593Smuzhiyun 		BEGIN_BCI(2);
363*4882a593Smuzhiyun 		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
364*4882a593Smuzhiyun 		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
365*4882a593Smuzhiyun 		dev_priv->state.common.vbaddr = dmabuf->bus_address;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
368*4882a593Smuzhiyun 		/* Workaround for what looks like a hardware bug. If a
369*4882a593Smuzhiyun 		 * WAIT_3D_IDLE was emitted some time before the
370*4882a593Smuzhiyun 		 * indexed drawing command then the engine will lock
371*4882a593Smuzhiyun 		 * up. There are two known workarounds:
372*4882a593Smuzhiyun 		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
373*4882a593Smuzhiyun 		BEGIN_BCI(63);
374*4882a593Smuzhiyun 		for (i = 0; i < 63; ++i)
375*4882a593Smuzhiyun 			BCI_WRITE(BCI_CMD_WAIT);
376*4882a593Smuzhiyun 		dev_priv->waiting = 0;
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	prim <<= 25;
380*4882a593Smuzhiyun 	while (n != 0) {
381*4882a593Smuzhiyun 		/* Can emit up to 255 indices (85 triangles) at once. */
382*4882a593Smuzhiyun 		unsigned int count = n > 255 ? 255 : n;
383*4882a593Smuzhiyun 		if (reorder) {
384*4882a593Smuzhiyun 			/* Need to reorder indices for correct flat
385*4882a593Smuzhiyun 			 * shading while preserving the clock sense
386*4882a593Smuzhiyun 			 * for correct culling. Only on Savage3D. */
387*4882a593Smuzhiyun 			int reorder[3] = { -1, -1, -1 };
388*4882a593Smuzhiyun 			reorder[start % 3] = 2;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 			BEGIN_BCI((count + 1 + 1) / 2);
391*4882a593Smuzhiyun 			BCI_DRAW_INDICES_S3D(count, prim, start + 2);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 			for (i = start + 1; i + 1 < start + count; i += 2)
394*4882a593Smuzhiyun 				BCI_WRITE((i + reorder[i % 3]) |
395*4882a593Smuzhiyun 					  ((i + 1 +
396*4882a593Smuzhiyun 					    reorder[(i + 1) % 3]) << 16));
397*4882a593Smuzhiyun 			if (i < start + count)
398*4882a593Smuzhiyun 				BCI_WRITE(i + reorder[i % 3]);
399*4882a593Smuzhiyun 		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
400*4882a593Smuzhiyun 			BEGIN_BCI((count + 1 + 1) / 2);
401*4882a593Smuzhiyun 			BCI_DRAW_INDICES_S3D(count, prim, start);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 			for (i = start + 1; i + 1 < start + count; i += 2)
404*4882a593Smuzhiyun 				BCI_WRITE(i | ((i + 1) << 16));
405*4882a593Smuzhiyun 			if (i < start + count)
406*4882a593Smuzhiyun 				BCI_WRITE(i);
407*4882a593Smuzhiyun 		} else {
408*4882a593Smuzhiyun 			BEGIN_BCI((count + 2 + 1) / 2);
409*4882a593Smuzhiyun 			BCI_DRAW_INDICES_S4(count, prim, skip);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 			for (i = start; i + 1 < start + count; i += 2)
412*4882a593Smuzhiyun 				BCI_WRITE(i | ((i + 1) << 16));
413*4882a593Smuzhiyun 			if (i < start + count)
414*4882a593Smuzhiyun 				BCI_WRITE(i);
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		start += count;
418*4882a593Smuzhiyun 		n -= count;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		prim |= BCI_CMD_DRAW_CONT;
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint32_t * vtxbuf,unsigned int vb_size,unsigned int vb_stride)426*4882a593Smuzhiyun static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
427*4882a593Smuzhiyun 				   const drm_savage_cmd_header_t * cmd_header,
428*4882a593Smuzhiyun 				   const uint32_t *vtxbuf, unsigned int vb_size,
429*4882a593Smuzhiyun 				   unsigned int vb_stride)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	unsigned char reorder = 0;
432*4882a593Smuzhiyun 	unsigned int prim = cmd_header->prim.prim;
433*4882a593Smuzhiyun 	unsigned int skip = cmd_header->prim.skip;
434*4882a593Smuzhiyun 	unsigned int n = cmd_header->prim.count;
435*4882a593Smuzhiyun 	unsigned int start = cmd_header->prim.start;
436*4882a593Smuzhiyun 	unsigned int vtx_size;
437*4882a593Smuzhiyun 	unsigned int i;
438*4882a593Smuzhiyun 	DMA_LOCALS;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (!n)
441*4882a593Smuzhiyun 		return 0;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	switch (prim) {
444*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST_201:
445*4882a593Smuzhiyun 		reorder = 1;
446*4882a593Smuzhiyun 		prim = SAVAGE_PRIM_TRILIST;
447*4882a593Smuzhiyun 		fallthrough;
448*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST:
449*4882a593Smuzhiyun 		if (n % 3 != 0) {
450*4882a593Smuzhiyun 			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
451*4882a593Smuzhiyun 				  n);
452*4882a593Smuzhiyun 			return -EINVAL;
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 		break;
455*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRISTRIP:
456*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRIFAN:
457*4882a593Smuzhiyun 		if (n < 3) {
458*4882a593Smuzhiyun 			DRM_ERROR
459*4882a593Smuzhiyun 			    ("wrong number of vertices %u in TRIFAN/STRIP\n",
460*4882a593Smuzhiyun 			     n);
461*4882a593Smuzhiyun 			return -EINVAL;
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 		break;
464*4882a593Smuzhiyun 	default:
465*4882a593Smuzhiyun 		DRM_ERROR("invalid primitive type %u\n", prim);
466*4882a593Smuzhiyun 		return -EINVAL;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
470*4882a593Smuzhiyun 		if (skip > SAVAGE_SKIP_ALL_S3D) {
471*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
472*4882a593Smuzhiyun 			return -EINVAL;
473*4882a593Smuzhiyun 		}
474*4882a593Smuzhiyun 		vtx_size = 8;	/* full vertex */
475*4882a593Smuzhiyun 	} else {
476*4882a593Smuzhiyun 		if (skip > SAVAGE_SKIP_ALL_S4) {
477*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
478*4882a593Smuzhiyun 			return -EINVAL;
479*4882a593Smuzhiyun 		}
480*4882a593Smuzhiyun 		vtx_size = 10;	/* full vertex */
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
484*4882a593Smuzhiyun 	    (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
485*4882a593Smuzhiyun 	    (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (vtx_size > vb_stride) {
488*4882a593Smuzhiyun 		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
489*4882a593Smuzhiyun 			  vtx_size, vb_stride);
490*4882a593Smuzhiyun 		return -EINVAL;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	if (start + n > vb_size / (vb_stride * 4)) {
494*4882a593Smuzhiyun 		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
495*4882a593Smuzhiyun 			  start, start + n - 1, vb_size / (vb_stride * 4));
496*4882a593Smuzhiyun 		return -EINVAL;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	prim <<= 25;
500*4882a593Smuzhiyun 	while (n != 0) {
501*4882a593Smuzhiyun 		/* Can emit up to 255 vertices (85 triangles) at once. */
502*4882a593Smuzhiyun 		unsigned int count = n > 255 ? 255 : n;
503*4882a593Smuzhiyun 		if (reorder) {
504*4882a593Smuzhiyun 			/* Need to reorder vertices for correct flat
505*4882a593Smuzhiyun 			 * shading while preserving the clock sense
506*4882a593Smuzhiyun 			 * for correct culling. Only on Savage3D. */
507*4882a593Smuzhiyun 			int reorder[3] = { -1, -1, -1 };
508*4882a593Smuzhiyun 			reorder[start % 3] = 2;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 			BEGIN_DMA(count * vtx_size + 1);
511*4882a593Smuzhiyun 			DMA_DRAW_PRIMITIVE(count, prim, skip);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 			for (i = start; i < start + count; ++i) {
514*4882a593Smuzhiyun 				unsigned int j = i + reorder[i % 3];
515*4882a593Smuzhiyun 				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
516*4882a593Smuzhiyun 			}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 			DMA_COMMIT();
519*4882a593Smuzhiyun 		} else {
520*4882a593Smuzhiyun 			BEGIN_DMA(count * vtx_size + 1);
521*4882a593Smuzhiyun 			DMA_DRAW_PRIMITIVE(count, prim, skip);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 			if (vb_stride == vtx_size) {
524*4882a593Smuzhiyun 				DMA_COPY(&vtxbuf[vb_stride * start],
525*4882a593Smuzhiyun 					 vtx_size * count);
526*4882a593Smuzhiyun 			} else {
527*4882a593Smuzhiyun 				for (i = start; i < start + count; ++i) {
528*4882a593Smuzhiyun 					DMA_COPY(&vtxbuf [vb_stride * i],
529*4882a593Smuzhiyun 						 vtx_size);
530*4882a593Smuzhiyun 				}
531*4882a593Smuzhiyun 			}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 			DMA_COMMIT();
534*4882a593Smuzhiyun 		}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		start += count;
537*4882a593Smuzhiyun 		n -= count;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		prim |= BCI_CMD_DRAW_CONT;
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return 0;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint16_t * idx,const struct drm_buf * dmabuf)545*4882a593Smuzhiyun static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
546*4882a593Smuzhiyun 				   const drm_savage_cmd_header_t * cmd_header,
547*4882a593Smuzhiyun 				   const uint16_t *idx,
548*4882a593Smuzhiyun 				   const struct drm_buf * dmabuf)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	unsigned char reorder = 0;
551*4882a593Smuzhiyun 	unsigned int prim = cmd_header->idx.prim;
552*4882a593Smuzhiyun 	unsigned int skip = cmd_header->idx.skip;
553*4882a593Smuzhiyun 	unsigned int n = cmd_header->idx.count;
554*4882a593Smuzhiyun 	unsigned int i;
555*4882a593Smuzhiyun 	BCI_LOCALS;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	if (!dmabuf) {
558*4882a593Smuzhiyun 		DRM_ERROR("called without dma buffers!\n");
559*4882a593Smuzhiyun 		return -EINVAL;
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	if (!n)
563*4882a593Smuzhiyun 		return 0;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	switch (prim) {
566*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST_201:
567*4882a593Smuzhiyun 		reorder = 1;
568*4882a593Smuzhiyun 		prim = SAVAGE_PRIM_TRILIST;
569*4882a593Smuzhiyun 		fallthrough;
570*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST:
571*4882a593Smuzhiyun 		if (n % 3 != 0) {
572*4882a593Smuzhiyun 			DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
573*4882a593Smuzhiyun 			return -EINVAL;
574*4882a593Smuzhiyun 		}
575*4882a593Smuzhiyun 		break;
576*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRISTRIP:
577*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRIFAN:
578*4882a593Smuzhiyun 		if (n < 3) {
579*4882a593Smuzhiyun 			DRM_ERROR
580*4882a593Smuzhiyun 			    ("wrong number of indices %u in TRIFAN/STRIP\n", n);
581*4882a593Smuzhiyun 			return -EINVAL;
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 		break;
584*4882a593Smuzhiyun 	default:
585*4882a593Smuzhiyun 		DRM_ERROR("invalid primitive type %u\n", prim);
586*4882a593Smuzhiyun 		return -EINVAL;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
590*4882a593Smuzhiyun 		if (skip != 0) {
591*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
592*4882a593Smuzhiyun 			return -EINVAL;
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 	} else {
595*4882a593Smuzhiyun 		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
596*4882a593Smuzhiyun 		    (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
597*4882a593Smuzhiyun 		    (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
598*4882a593Smuzhiyun 		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
599*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
600*4882a593Smuzhiyun 			return -EINVAL;
601*4882a593Smuzhiyun 		}
602*4882a593Smuzhiyun 		if (reorder) {
603*4882a593Smuzhiyun 			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
604*4882a593Smuzhiyun 			return -EINVAL;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* Vertex DMA doesn't work with command DMA at the same time,
609*4882a593Smuzhiyun 	 * so we use BCI_... to submit commands here. Flush buffered
610*4882a593Smuzhiyun 	 * faked DMA first. */
611*4882a593Smuzhiyun 	DMA_FLUSH();
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
614*4882a593Smuzhiyun 		BEGIN_BCI(2);
615*4882a593Smuzhiyun 		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
616*4882a593Smuzhiyun 		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
617*4882a593Smuzhiyun 		dev_priv->state.common.vbaddr = dmabuf->bus_address;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
620*4882a593Smuzhiyun 		/* Workaround for what looks like a hardware bug. If a
621*4882a593Smuzhiyun 		 * WAIT_3D_IDLE was emitted some time before the
622*4882a593Smuzhiyun 		 * indexed drawing command then the engine will lock
623*4882a593Smuzhiyun 		 * up. There are two known workarounds:
624*4882a593Smuzhiyun 		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
625*4882a593Smuzhiyun 		BEGIN_BCI(63);
626*4882a593Smuzhiyun 		for (i = 0; i < 63; ++i)
627*4882a593Smuzhiyun 			BCI_WRITE(BCI_CMD_WAIT);
628*4882a593Smuzhiyun 		dev_priv->waiting = 0;
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	prim <<= 25;
632*4882a593Smuzhiyun 	while (n != 0) {
633*4882a593Smuzhiyun 		/* Can emit up to 255 indices (85 triangles) at once. */
634*4882a593Smuzhiyun 		unsigned int count = n > 255 ? 255 : n;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		/* check indices */
637*4882a593Smuzhiyun 		for (i = 0; i < count; ++i) {
638*4882a593Smuzhiyun 			if (idx[i] > dmabuf->total / 32) {
639*4882a593Smuzhiyun 				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
640*4882a593Smuzhiyun 					  i, idx[i], dmabuf->total / 32);
641*4882a593Smuzhiyun 				return -EINVAL;
642*4882a593Smuzhiyun 			}
643*4882a593Smuzhiyun 		}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		if (reorder) {
646*4882a593Smuzhiyun 			/* Need to reorder indices for correct flat
647*4882a593Smuzhiyun 			 * shading while preserving the clock sense
648*4882a593Smuzhiyun 			 * for correct culling. Only on Savage3D. */
649*4882a593Smuzhiyun 			int reorder[3] = { 2, -1, -1 };
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 			BEGIN_BCI((count + 1 + 1) / 2);
652*4882a593Smuzhiyun 			BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 			for (i = 1; i + 1 < count; i += 2)
655*4882a593Smuzhiyun 				BCI_WRITE(idx[i + reorder[i % 3]] |
656*4882a593Smuzhiyun 					  (idx[i + 1 +
657*4882a593Smuzhiyun 					   reorder[(i + 1) % 3]] << 16));
658*4882a593Smuzhiyun 			if (i < count)
659*4882a593Smuzhiyun 				BCI_WRITE(idx[i + reorder[i % 3]]);
660*4882a593Smuzhiyun 		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
661*4882a593Smuzhiyun 			BEGIN_BCI((count + 1 + 1) / 2);
662*4882a593Smuzhiyun 			BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 			for (i = 1; i + 1 < count; i += 2)
665*4882a593Smuzhiyun 				BCI_WRITE(idx[i] | (idx[i + 1] << 16));
666*4882a593Smuzhiyun 			if (i < count)
667*4882a593Smuzhiyun 				BCI_WRITE(idx[i]);
668*4882a593Smuzhiyun 		} else {
669*4882a593Smuzhiyun 			BEGIN_BCI((count + 2 + 1) / 2);
670*4882a593Smuzhiyun 			BCI_DRAW_INDICES_S4(count, prim, skip);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 			for (i = 0; i + 1 < count; i += 2)
673*4882a593Smuzhiyun 				BCI_WRITE(idx[i] | (idx[i + 1] << 16));
674*4882a593Smuzhiyun 			if (i < count)
675*4882a593Smuzhiyun 				BCI_WRITE(idx[i]);
676*4882a593Smuzhiyun 		}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 		idx += count;
679*4882a593Smuzhiyun 		n -= count;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		prim |= BCI_CMD_DRAW_CONT;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return 0;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint16_t * idx,const uint32_t * vtxbuf,unsigned int vb_size,unsigned int vb_stride)687*4882a593Smuzhiyun static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
688*4882a593Smuzhiyun 				  const drm_savage_cmd_header_t * cmd_header,
689*4882a593Smuzhiyun 				  const uint16_t *idx,
690*4882a593Smuzhiyun 				  const uint32_t *vtxbuf,
691*4882a593Smuzhiyun 				  unsigned int vb_size, unsigned int vb_stride)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	unsigned char reorder = 0;
694*4882a593Smuzhiyun 	unsigned int prim = cmd_header->idx.prim;
695*4882a593Smuzhiyun 	unsigned int skip = cmd_header->idx.skip;
696*4882a593Smuzhiyun 	unsigned int n = cmd_header->idx.count;
697*4882a593Smuzhiyun 	unsigned int vtx_size;
698*4882a593Smuzhiyun 	unsigned int i;
699*4882a593Smuzhiyun 	DMA_LOCALS;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (!n)
702*4882a593Smuzhiyun 		return 0;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	switch (prim) {
705*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST_201:
706*4882a593Smuzhiyun 		reorder = 1;
707*4882a593Smuzhiyun 		prim = SAVAGE_PRIM_TRILIST;
708*4882a593Smuzhiyun 		fallthrough;
709*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRILIST:
710*4882a593Smuzhiyun 		if (n % 3 != 0) {
711*4882a593Smuzhiyun 			DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
712*4882a593Smuzhiyun 			return -EINVAL;
713*4882a593Smuzhiyun 		}
714*4882a593Smuzhiyun 		break;
715*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRISTRIP:
716*4882a593Smuzhiyun 	case SAVAGE_PRIM_TRIFAN:
717*4882a593Smuzhiyun 		if (n < 3) {
718*4882a593Smuzhiyun 			DRM_ERROR
719*4882a593Smuzhiyun 			    ("wrong number of indices %u in TRIFAN/STRIP\n", n);
720*4882a593Smuzhiyun 			return -EINVAL;
721*4882a593Smuzhiyun 		}
722*4882a593Smuzhiyun 		break;
723*4882a593Smuzhiyun 	default:
724*4882a593Smuzhiyun 		DRM_ERROR("invalid primitive type %u\n", prim);
725*4882a593Smuzhiyun 		return -EINVAL;
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
729*4882a593Smuzhiyun 		if (skip > SAVAGE_SKIP_ALL_S3D) {
730*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
731*4882a593Smuzhiyun 			return -EINVAL;
732*4882a593Smuzhiyun 		}
733*4882a593Smuzhiyun 		vtx_size = 8;	/* full vertex */
734*4882a593Smuzhiyun 	} else {
735*4882a593Smuzhiyun 		if (skip > SAVAGE_SKIP_ALL_S4) {
736*4882a593Smuzhiyun 			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
737*4882a593Smuzhiyun 			return -EINVAL;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 		vtx_size = 10;	/* full vertex */
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
743*4882a593Smuzhiyun 	    (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
744*4882a593Smuzhiyun 	    (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (vtx_size > vb_stride) {
747*4882a593Smuzhiyun 		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
748*4882a593Smuzhiyun 			  vtx_size, vb_stride);
749*4882a593Smuzhiyun 		return -EINVAL;
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	prim <<= 25;
753*4882a593Smuzhiyun 	while (n != 0) {
754*4882a593Smuzhiyun 		/* Can emit up to 255 vertices (85 triangles) at once. */
755*4882a593Smuzhiyun 		unsigned int count = n > 255 ? 255 : n;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		/* Check indices */
758*4882a593Smuzhiyun 		for (i = 0; i < count; ++i) {
759*4882a593Smuzhiyun 			if (idx[i] > vb_size / (vb_stride * 4)) {
760*4882a593Smuzhiyun 				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
761*4882a593Smuzhiyun 					  i, idx[i], vb_size / (vb_stride * 4));
762*4882a593Smuzhiyun 				return -EINVAL;
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		if (reorder) {
767*4882a593Smuzhiyun 			/* Need to reorder vertices for correct flat
768*4882a593Smuzhiyun 			 * shading while preserving the clock sense
769*4882a593Smuzhiyun 			 * for correct culling. Only on Savage3D. */
770*4882a593Smuzhiyun 			int reorder[3] = { 2, -1, -1 };
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 			BEGIN_DMA(count * vtx_size + 1);
773*4882a593Smuzhiyun 			DMA_DRAW_PRIMITIVE(count, prim, skip);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 			for (i = 0; i < count; ++i) {
776*4882a593Smuzhiyun 				unsigned int j = idx[i + reorder[i % 3]];
777*4882a593Smuzhiyun 				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
778*4882a593Smuzhiyun 			}
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 			DMA_COMMIT();
781*4882a593Smuzhiyun 		} else {
782*4882a593Smuzhiyun 			BEGIN_DMA(count * vtx_size + 1);
783*4882a593Smuzhiyun 			DMA_DRAW_PRIMITIVE(count, prim, skip);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 			for (i = 0; i < count; ++i) {
786*4882a593Smuzhiyun 				unsigned int j = idx[i];
787*4882a593Smuzhiyun 				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
788*4882a593Smuzhiyun 			}
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 			DMA_COMMIT();
791*4882a593Smuzhiyun 		}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		idx += count;
794*4882a593Smuzhiyun 		n -= count;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 		prim |= BCI_CMD_DRAW_CONT;
797*4882a593Smuzhiyun 	}
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	return 0;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
savage_dispatch_clear(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const drm_savage_cmd_header_t * data,unsigned int nbox,const struct drm_clip_rect * boxes)802*4882a593Smuzhiyun static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
803*4882a593Smuzhiyun 				 const drm_savage_cmd_header_t * cmd_header,
804*4882a593Smuzhiyun 				 const drm_savage_cmd_header_t *data,
805*4882a593Smuzhiyun 				 unsigned int nbox,
806*4882a593Smuzhiyun 				 const struct drm_clip_rect *boxes)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	unsigned int flags = cmd_header->clear0.flags;
809*4882a593Smuzhiyun 	unsigned int clear_cmd;
810*4882a593Smuzhiyun 	unsigned int i, nbufs;
811*4882a593Smuzhiyun 	DMA_LOCALS;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	if (nbox == 0)
814*4882a593Smuzhiyun 		return 0;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
817*4882a593Smuzhiyun 	    BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
818*4882a593Smuzhiyun 	BCI_CMD_SET_ROP(clear_cmd, 0xCC);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
821*4882a593Smuzhiyun 	    ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
822*4882a593Smuzhiyun 	if (nbufs == 0)
823*4882a593Smuzhiyun 		return 0;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	if (data->clear1.mask != 0xffffffff) {
826*4882a593Smuzhiyun 		/* set mask */
827*4882a593Smuzhiyun 		BEGIN_DMA(2);
828*4882a593Smuzhiyun 		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
829*4882a593Smuzhiyun 		DMA_WRITE(data->clear1.mask);
830*4882a593Smuzhiyun 		DMA_COMMIT();
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 	for (i = 0; i < nbox; ++i) {
833*4882a593Smuzhiyun 		unsigned int x, y, w, h;
834*4882a593Smuzhiyun 		unsigned int buf;
835*4882a593Smuzhiyun 		x = boxes[i].x1, y = boxes[i].y1;
836*4882a593Smuzhiyun 		w = boxes[i].x2 - boxes[i].x1;
837*4882a593Smuzhiyun 		h = boxes[i].y2 - boxes[i].y1;
838*4882a593Smuzhiyun 		BEGIN_DMA(nbufs * 6);
839*4882a593Smuzhiyun 		for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
840*4882a593Smuzhiyun 			if (!(flags & buf))
841*4882a593Smuzhiyun 				continue;
842*4882a593Smuzhiyun 			DMA_WRITE(clear_cmd);
843*4882a593Smuzhiyun 			switch (buf) {
844*4882a593Smuzhiyun 			case SAVAGE_FRONT:
845*4882a593Smuzhiyun 				DMA_WRITE(dev_priv->front_offset);
846*4882a593Smuzhiyun 				DMA_WRITE(dev_priv->front_bd);
847*4882a593Smuzhiyun 				break;
848*4882a593Smuzhiyun 			case SAVAGE_BACK:
849*4882a593Smuzhiyun 				DMA_WRITE(dev_priv->back_offset);
850*4882a593Smuzhiyun 				DMA_WRITE(dev_priv->back_bd);
851*4882a593Smuzhiyun 				break;
852*4882a593Smuzhiyun 			case SAVAGE_DEPTH:
853*4882a593Smuzhiyun 				DMA_WRITE(dev_priv->depth_offset);
854*4882a593Smuzhiyun 				DMA_WRITE(dev_priv->depth_bd);
855*4882a593Smuzhiyun 				break;
856*4882a593Smuzhiyun 			}
857*4882a593Smuzhiyun 			DMA_WRITE(data->clear1.value);
858*4882a593Smuzhiyun 			DMA_WRITE(BCI_X_Y(x, y));
859*4882a593Smuzhiyun 			DMA_WRITE(BCI_W_H(w, h));
860*4882a593Smuzhiyun 		}
861*4882a593Smuzhiyun 		DMA_COMMIT();
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 	if (data->clear1.mask != 0xffffffff) {
864*4882a593Smuzhiyun 		/* reset mask */
865*4882a593Smuzhiyun 		BEGIN_DMA(2);
866*4882a593Smuzhiyun 		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
867*4882a593Smuzhiyun 		DMA_WRITE(0xffffffff);
868*4882a593Smuzhiyun 		DMA_COMMIT();
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
savage_dispatch_swap(drm_savage_private_t * dev_priv,unsigned int nbox,const struct drm_clip_rect * boxes)874*4882a593Smuzhiyun static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
875*4882a593Smuzhiyun 				unsigned int nbox, const struct drm_clip_rect *boxes)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun 	unsigned int swap_cmd;
878*4882a593Smuzhiyun 	unsigned int i;
879*4882a593Smuzhiyun 	DMA_LOCALS;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	if (nbox == 0)
882*4882a593Smuzhiyun 		return 0;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
885*4882a593Smuzhiyun 	    BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
886*4882a593Smuzhiyun 	BCI_CMD_SET_ROP(swap_cmd, 0xCC);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	for (i = 0; i < nbox; ++i) {
889*4882a593Smuzhiyun 		BEGIN_DMA(6);
890*4882a593Smuzhiyun 		DMA_WRITE(swap_cmd);
891*4882a593Smuzhiyun 		DMA_WRITE(dev_priv->back_offset);
892*4882a593Smuzhiyun 		DMA_WRITE(dev_priv->back_bd);
893*4882a593Smuzhiyun 		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
894*4882a593Smuzhiyun 		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
895*4882a593Smuzhiyun 		DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
896*4882a593Smuzhiyun 				  boxes[i].y2 - boxes[i].y1));
897*4882a593Smuzhiyun 		DMA_COMMIT();
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	return 0;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun 
savage_dispatch_draw(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * start,const drm_savage_cmd_header_t * end,const struct drm_buf * dmabuf,const unsigned int * vtxbuf,unsigned int vb_size,unsigned int vb_stride,unsigned int nbox,const struct drm_clip_rect * boxes)903*4882a593Smuzhiyun static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
904*4882a593Smuzhiyun 				const drm_savage_cmd_header_t *start,
905*4882a593Smuzhiyun 				const drm_savage_cmd_header_t *end,
906*4882a593Smuzhiyun 				const struct drm_buf * dmabuf,
907*4882a593Smuzhiyun 				const unsigned int *vtxbuf,
908*4882a593Smuzhiyun 				unsigned int vb_size, unsigned int vb_stride,
909*4882a593Smuzhiyun 				unsigned int nbox,
910*4882a593Smuzhiyun 				const struct drm_clip_rect *boxes)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun 	unsigned int i, j;
913*4882a593Smuzhiyun 	int ret;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	for (i = 0; i < nbox; ++i) {
916*4882a593Smuzhiyun 		const drm_savage_cmd_header_t *cmdbuf;
917*4882a593Smuzhiyun 		dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 		cmdbuf = start;
920*4882a593Smuzhiyun 		while (cmdbuf < end) {
921*4882a593Smuzhiyun 			drm_savage_cmd_header_t cmd_header;
922*4882a593Smuzhiyun 			cmd_header = *cmdbuf;
923*4882a593Smuzhiyun 			cmdbuf++;
924*4882a593Smuzhiyun 			switch (cmd_header.cmd.cmd) {
925*4882a593Smuzhiyun 			case SAVAGE_CMD_DMA_PRIM:
926*4882a593Smuzhiyun 				ret = savage_dispatch_dma_prim(
927*4882a593Smuzhiyun 					dev_priv, &cmd_header, dmabuf);
928*4882a593Smuzhiyun 				break;
929*4882a593Smuzhiyun 			case SAVAGE_CMD_VB_PRIM:
930*4882a593Smuzhiyun 				ret = savage_dispatch_vb_prim(
931*4882a593Smuzhiyun 					dev_priv, &cmd_header,
932*4882a593Smuzhiyun 					vtxbuf, vb_size, vb_stride);
933*4882a593Smuzhiyun 				break;
934*4882a593Smuzhiyun 			case SAVAGE_CMD_DMA_IDX:
935*4882a593Smuzhiyun 				j = (cmd_header.idx.count + 3) / 4;
936*4882a593Smuzhiyun 				/* j was check in savage_bci_cmdbuf */
937*4882a593Smuzhiyun 				ret = savage_dispatch_dma_idx(dev_priv,
938*4882a593Smuzhiyun 					&cmd_header, (const uint16_t *)cmdbuf,
939*4882a593Smuzhiyun 					dmabuf);
940*4882a593Smuzhiyun 				cmdbuf += j;
941*4882a593Smuzhiyun 				break;
942*4882a593Smuzhiyun 			case SAVAGE_CMD_VB_IDX:
943*4882a593Smuzhiyun 				j = (cmd_header.idx.count + 3) / 4;
944*4882a593Smuzhiyun 				/* j was check in savage_bci_cmdbuf */
945*4882a593Smuzhiyun 				ret = savage_dispatch_vb_idx(dev_priv,
946*4882a593Smuzhiyun 					&cmd_header, (const uint16_t *)cmdbuf,
947*4882a593Smuzhiyun 					(const uint32_t *)vtxbuf, vb_size,
948*4882a593Smuzhiyun 					vb_stride);
949*4882a593Smuzhiyun 				cmdbuf += j;
950*4882a593Smuzhiyun 				break;
951*4882a593Smuzhiyun 			default:
952*4882a593Smuzhiyun 				/* What's the best return code? EFAULT? */
953*4882a593Smuzhiyun 				DRM_ERROR("IMPLEMENTATION ERROR: "
954*4882a593Smuzhiyun 					  "non-drawing-command %d\n",
955*4882a593Smuzhiyun 					  cmd_header.cmd.cmd);
956*4882a593Smuzhiyun 				return -EINVAL;
957*4882a593Smuzhiyun 			}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 			if (ret != 0)
960*4882a593Smuzhiyun 				return ret;
961*4882a593Smuzhiyun 		}
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	return 0;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun 
savage_bci_cmdbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)967*4882a593Smuzhiyun int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	drm_savage_private_t *dev_priv = dev->dev_private;
970*4882a593Smuzhiyun 	struct drm_device_dma *dma = dev->dma;
971*4882a593Smuzhiyun 	struct drm_buf *dmabuf;
972*4882a593Smuzhiyun 	drm_savage_cmdbuf_t *cmdbuf = data;
973*4882a593Smuzhiyun 	drm_savage_cmd_header_t *kcmd_addr = NULL;
974*4882a593Smuzhiyun 	drm_savage_cmd_header_t *first_draw_cmd;
975*4882a593Smuzhiyun 	unsigned int *kvb_addr = NULL;
976*4882a593Smuzhiyun 	struct drm_clip_rect *kbox_addr = NULL;
977*4882a593Smuzhiyun 	unsigned int i, j;
978*4882a593Smuzhiyun 	int ret = 0;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	DRM_DEBUG("\n");
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	LOCK_TEST_WITH_RETURN(dev, file_priv);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (dma && dma->buflist) {
985*4882a593Smuzhiyun 		if (cmdbuf->dma_idx >= dma->buf_count) {
986*4882a593Smuzhiyun 			DRM_ERROR
987*4882a593Smuzhiyun 			    ("vertex buffer index %u out of range (0-%u)\n",
988*4882a593Smuzhiyun 			     cmdbuf->dma_idx, dma->buf_count - 1);
989*4882a593Smuzhiyun 			return -EINVAL;
990*4882a593Smuzhiyun 		}
991*4882a593Smuzhiyun 		dmabuf = dma->buflist[cmdbuf->dma_idx];
992*4882a593Smuzhiyun 	} else {
993*4882a593Smuzhiyun 		dmabuf = NULL;
994*4882a593Smuzhiyun 	}
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* Copy the user buffers into kernel temporary areas.  This hasn't been
997*4882a593Smuzhiyun 	 * a performance loss compared to VERIFYAREA_READ/
998*4882a593Smuzhiyun 	 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
999*4882a593Smuzhiyun 	 * for locking on FreeBSD.
1000*4882a593Smuzhiyun 	 */
1001*4882a593Smuzhiyun 	if (cmdbuf->size) {
1002*4882a593Smuzhiyun 		kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
1003*4882a593Smuzhiyun 		if (kcmd_addr == NULL)
1004*4882a593Smuzhiyun 			return -ENOMEM;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 		if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr,
1007*4882a593Smuzhiyun 				       cmdbuf->size * 8))
1008*4882a593Smuzhiyun 		{
1009*4882a593Smuzhiyun 			kfree(kcmd_addr);
1010*4882a593Smuzhiyun 			return -EFAULT;
1011*4882a593Smuzhiyun 		}
1012*4882a593Smuzhiyun 		cmdbuf->cmd_addr = kcmd_addr;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 	if (cmdbuf->vb_size) {
1015*4882a593Smuzhiyun 		kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
1016*4882a593Smuzhiyun 		if (IS_ERR(kvb_addr)) {
1017*4882a593Smuzhiyun 			ret = PTR_ERR(kvb_addr);
1018*4882a593Smuzhiyun 			kvb_addr = NULL;
1019*4882a593Smuzhiyun 			goto done;
1020*4882a593Smuzhiyun 		}
1021*4882a593Smuzhiyun 		cmdbuf->vb_addr = kvb_addr;
1022*4882a593Smuzhiyun 	}
1023*4882a593Smuzhiyun 	if (cmdbuf->nbox) {
1024*4882a593Smuzhiyun 		kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
1025*4882a593Smuzhiyun 					  GFP_KERNEL);
1026*4882a593Smuzhiyun 		if (kbox_addr == NULL) {
1027*4882a593Smuzhiyun 			ret = -ENOMEM;
1028*4882a593Smuzhiyun 			goto done;
1029*4882a593Smuzhiyun 		}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		if (copy_from_user(kbox_addr, cmdbuf->box_addr,
1032*4882a593Smuzhiyun 				       cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
1033*4882a593Smuzhiyun 			ret = -EFAULT;
1034*4882a593Smuzhiyun 			goto done;
1035*4882a593Smuzhiyun 		}
1036*4882a593Smuzhiyun 	cmdbuf->box_addr = kbox_addr;
1037*4882a593Smuzhiyun 	}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	/* Make sure writes to DMA buffers are finished before sending
1040*4882a593Smuzhiyun 	 * DMA commands to the graphics hardware. */
1041*4882a593Smuzhiyun 	mb();
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	/* Coming from user space. Don't know if the Xserver has
1044*4882a593Smuzhiyun 	 * emitted wait commands. Assuming the worst. */
1045*4882a593Smuzhiyun 	dev_priv->waiting = 1;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	i = 0;
1048*4882a593Smuzhiyun 	first_draw_cmd = NULL;
1049*4882a593Smuzhiyun 	while (i < cmdbuf->size) {
1050*4882a593Smuzhiyun 		drm_savage_cmd_header_t cmd_header;
1051*4882a593Smuzhiyun 		cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
1052*4882a593Smuzhiyun 		cmdbuf->cmd_addr++;
1053*4882a593Smuzhiyun 		i++;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		/* Group drawing commands with same state to minimize
1056*4882a593Smuzhiyun 		 * iterations over clip rects. */
1057*4882a593Smuzhiyun 		j = 0;
1058*4882a593Smuzhiyun 		switch (cmd_header.cmd.cmd) {
1059*4882a593Smuzhiyun 		case SAVAGE_CMD_DMA_IDX:
1060*4882a593Smuzhiyun 		case SAVAGE_CMD_VB_IDX:
1061*4882a593Smuzhiyun 			j = (cmd_header.idx.count + 3) / 4;
1062*4882a593Smuzhiyun 			if (i + j > cmdbuf->size) {
1063*4882a593Smuzhiyun 				DRM_ERROR("indexed drawing command extends "
1064*4882a593Smuzhiyun 					  "beyond end of command buffer\n");
1065*4882a593Smuzhiyun 				DMA_FLUSH();
1066*4882a593Smuzhiyun 				ret = -EINVAL;
1067*4882a593Smuzhiyun 				goto done;
1068*4882a593Smuzhiyun 			}
1069*4882a593Smuzhiyun 			fallthrough;
1070*4882a593Smuzhiyun 		case SAVAGE_CMD_DMA_PRIM:
1071*4882a593Smuzhiyun 		case SAVAGE_CMD_VB_PRIM:
1072*4882a593Smuzhiyun 			if (!first_draw_cmd)
1073*4882a593Smuzhiyun 				first_draw_cmd = cmdbuf->cmd_addr - 1;
1074*4882a593Smuzhiyun 			cmdbuf->cmd_addr += j;
1075*4882a593Smuzhiyun 			i += j;
1076*4882a593Smuzhiyun 			break;
1077*4882a593Smuzhiyun 		default:
1078*4882a593Smuzhiyun 			if (first_draw_cmd) {
1079*4882a593Smuzhiyun 				ret = savage_dispatch_draw(
1080*4882a593Smuzhiyun 				      dev_priv, first_draw_cmd,
1081*4882a593Smuzhiyun 				      cmdbuf->cmd_addr - 1,
1082*4882a593Smuzhiyun 				      dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
1083*4882a593Smuzhiyun 				      cmdbuf->vb_stride,
1084*4882a593Smuzhiyun 				      cmdbuf->nbox, cmdbuf->box_addr);
1085*4882a593Smuzhiyun 				if (ret != 0)
1086*4882a593Smuzhiyun 					goto done;
1087*4882a593Smuzhiyun 				first_draw_cmd = NULL;
1088*4882a593Smuzhiyun 			}
1089*4882a593Smuzhiyun 		}
1090*4882a593Smuzhiyun 		if (first_draw_cmd)
1091*4882a593Smuzhiyun 			continue;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 		switch (cmd_header.cmd.cmd) {
1094*4882a593Smuzhiyun 		case SAVAGE_CMD_STATE:
1095*4882a593Smuzhiyun 			j = (cmd_header.state.count + 1) / 2;
1096*4882a593Smuzhiyun 			if (i + j > cmdbuf->size) {
1097*4882a593Smuzhiyun 				DRM_ERROR("command SAVAGE_CMD_STATE extends "
1098*4882a593Smuzhiyun 					  "beyond end of command buffer\n");
1099*4882a593Smuzhiyun 				DMA_FLUSH();
1100*4882a593Smuzhiyun 				ret = -EINVAL;
1101*4882a593Smuzhiyun 				goto done;
1102*4882a593Smuzhiyun 			}
1103*4882a593Smuzhiyun 			ret = savage_dispatch_state(dev_priv, &cmd_header,
1104*4882a593Smuzhiyun 				(const uint32_t *)cmdbuf->cmd_addr);
1105*4882a593Smuzhiyun 			cmdbuf->cmd_addr += j;
1106*4882a593Smuzhiyun 			i += j;
1107*4882a593Smuzhiyun 			break;
1108*4882a593Smuzhiyun 		case SAVAGE_CMD_CLEAR:
1109*4882a593Smuzhiyun 			if (i + 1 > cmdbuf->size) {
1110*4882a593Smuzhiyun 				DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1111*4882a593Smuzhiyun 					  "beyond end of command buffer\n");
1112*4882a593Smuzhiyun 				DMA_FLUSH();
1113*4882a593Smuzhiyun 				ret = -EINVAL;
1114*4882a593Smuzhiyun 				goto done;
1115*4882a593Smuzhiyun 			}
1116*4882a593Smuzhiyun 			ret = savage_dispatch_clear(dev_priv, &cmd_header,
1117*4882a593Smuzhiyun 						    cmdbuf->cmd_addr,
1118*4882a593Smuzhiyun 						    cmdbuf->nbox,
1119*4882a593Smuzhiyun 						    cmdbuf->box_addr);
1120*4882a593Smuzhiyun 			cmdbuf->cmd_addr++;
1121*4882a593Smuzhiyun 			i++;
1122*4882a593Smuzhiyun 			break;
1123*4882a593Smuzhiyun 		case SAVAGE_CMD_SWAP:
1124*4882a593Smuzhiyun 			ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
1125*4882a593Smuzhiyun 						   cmdbuf->box_addr);
1126*4882a593Smuzhiyun 			break;
1127*4882a593Smuzhiyun 		default:
1128*4882a593Smuzhiyun 			DRM_ERROR("invalid command 0x%x\n",
1129*4882a593Smuzhiyun 				  cmd_header.cmd.cmd);
1130*4882a593Smuzhiyun 			DMA_FLUSH();
1131*4882a593Smuzhiyun 			ret = -EINVAL;
1132*4882a593Smuzhiyun 			goto done;
1133*4882a593Smuzhiyun 		}
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 		if (ret != 0) {
1136*4882a593Smuzhiyun 			DMA_FLUSH();
1137*4882a593Smuzhiyun 			goto done;
1138*4882a593Smuzhiyun 		}
1139*4882a593Smuzhiyun 	}
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	if (first_draw_cmd) {
1142*4882a593Smuzhiyun 		ret = savage_dispatch_draw (
1143*4882a593Smuzhiyun 			dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
1144*4882a593Smuzhiyun 			cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
1145*4882a593Smuzhiyun 			cmdbuf->nbox, cmdbuf->box_addr);
1146*4882a593Smuzhiyun 		if (ret != 0) {
1147*4882a593Smuzhiyun 			DMA_FLUSH();
1148*4882a593Smuzhiyun 			goto done;
1149*4882a593Smuzhiyun 		}
1150*4882a593Smuzhiyun 	}
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	DMA_FLUSH();
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	if (dmabuf && cmdbuf->discard) {
1155*4882a593Smuzhiyun 		drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1156*4882a593Smuzhiyun 		uint16_t event;
1157*4882a593Smuzhiyun 		event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1158*4882a593Smuzhiyun 		SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1159*4882a593Smuzhiyun 		savage_freelist_put(dev, dmabuf);
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun done:
1163*4882a593Smuzhiyun 	/* If we didn't need to allocate them, these'll be NULL */
1164*4882a593Smuzhiyun 	kfree(kcmd_addr);
1165*4882a593Smuzhiyun 	kfree(kvb_addr);
1166*4882a593Smuzhiyun 	kfree(kbox_addr);
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	return ret;
1169*4882a593Smuzhiyun }
1170