1*4882a593Smuzhiyun /* savage_bci.c -- BCI support for Savage
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright 2004 Felix Kuehling
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sub license,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
14*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
15*4882a593Smuzhiyun * of the Software.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20*4882a593Smuzhiyun * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21*4882a593Smuzhiyun * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22*4882a593Smuzhiyun * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23*4882a593Smuzhiyun * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <linux/pci.h>
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <drm/drm_device.h>
32*4882a593Smuzhiyun #include <drm/drm_file.h>
33*4882a593Smuzhiyun #include <drm/drm_print.h>
34*4882a593Smuzhiyun #include <drm/savage_drm.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "savage_drv.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* Need a long timeout for shadow status updates can take a while
39*4882a593Smuzhiyun * and so can waiting for events when the queue is full. */
40*4882a593Smuzhiyun #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
41*4882a593Smuzhiyun #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
42*4882a593Smuzhiyun #define SAVAGE_FREELIST_DEBUG 0
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static int savage_do_cleanup_bci(struct drm_device *dev);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun static int
savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv,unsigned int n)47*4882a593Smuzhiyun savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun uint32_t mask = dev_priv->status_used_mask;
50*4882a593Smuzhiyun uint32_t threshold = dev_priv->bci_threshold_hi;
51*4882a593Smuzhiyun uint32_t status;
52*4882a593Smuzhiyun int i;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #if SAVAGE_BCI_DEBUG
55*4882a593Smuzhiyun if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
56*4882a593Smuzhiyun DRM_ERROR("Trying to emit %d words "
57*4882a593Smuzhiyun "(more than guaranteed space in COB)\n", n);
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
61*4882a593Smuzhiyun mb();
62*4882a593Smuzhiyun status = dev_priv->status_ptr[0];
63*4882a593Smuzhiyun if ((status & mask) < threshold)
64*4882a593Smuzhiyun return 0;
65*4882a593Smuzhiyun udelay(1);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #if SAVAGE_BCI_DEBUG
69*4882a593Smuzhiyun DRM_ERROR("failed!\n");
70*4882a593Smuzhiyun DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun return -EBUSY;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static int
savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv,unsigned int n)76*4882a593Smuzhiyun savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
79*4882a593Smuzhiyun uint32_t status;
80*4882a593Smuzhiyun int i;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
83*4882a593Smuzhiyun status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
84*4882a593Smuzhiyun if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun udelay(1);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #if SAVAGE_BCI_DEBUG
90*4882a593Smuzhiyun DRM_ERROR("failed!\n");
91*4882a593Smuzhiyun DRM_INFO(" status=0x%08x\n", status);
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun return -EBUSY;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static int
savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv,unsigned int n)97*4882a593Smuzhiyun savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
100*4882a593Smuzhiyun uint32_t status;
101*4882a593Smuzhiyun int i;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
104*4882a593Smuzhiyun status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
105*4882a593Smuzhiyun if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun udelay(1);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #if SAVAGE_BCI_DEBUG
111*4882a593Smuzhiyun DRM_ERROR("failed!\n");
112*4882a593Smuzhiyun DRM_INFO(" status=0x%08x\n", status);
113*4882a593Smuzhiyun #endif
114*4882a593Smuzhiyun return -EBUSY;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * Waiting for events.
119*4882a593Smuzhiyun *
120*4882a593Smuzhiyun * The BIOSresets the event tag to 0 on mode changes. Therefore we
121*4882a593Smuzhiyun * never emit 0 to the event tag. If we find a 0 event tag we know the
122*4882a593Smuzhiyun * BIOS stomped on it and return success assuming that the BIOS waited
123*4882a593Smuzhiyun * for engine idle.
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * Note: if the Xserver uses the event tag it has to follow the same
126*4882a593Smuzhiyun * rule. Otherwise there may be glitches every 2^16 events.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun static int
savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv,uint16_t e)129*4882a593Smuzhiyun savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun uint32_t status;
132*4882a593Smuzhiyun int i;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
135*4882a593Smuzhiyun mb();
136*4882a593Smuzhiyun status = dev_priv->status_ptr[1];
137*4882a593Smuzhiyun if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
138*4882a593Smuzhiyun (status & 0xffff) == 0)
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun udelay(1);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #if SAVAGE_BCI_DEBUG
144*4882a593Smuzhiyun DRM_ERROR("failed!\n");
145*4882a593Smuzhiyun DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
146*4882a593Smuzhiyun #endif
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return -EBUSY;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun static int
savage_bci_wait_event_reg(drm_savage_private_t * dev_priv,uint16_t e)152*4882a593Smuzhiyun savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun uint32_t status;
155*4882a593Smuzhiyun int i;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
158*4882a593Smuzhiyun status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
159*4882a593Smuzhiyun if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
160*4882a593Smuzhiyun (status & 0xffff) == 0)
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun udelay(1);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #if SAVAGE_BCI_DEBUG
166*4882a593Smuzhiyun DRM_ERROR("failed!\n");
167*4882a593Smuzhiyun DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return -EBUSY;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
savage_bci_emit_event(drm_savage_private_t * dev_priv,unsigned int flags)173*4882a593Smuzhiyun uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
174*4882a593Smuzhiyun unsigned int flags)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun uint16_t count;
177*4882a593Smuzhiyun BCI_LOCALS;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (dev_priv->status_ptr) {
180*4882a593Smuzhiyun /* coordinate with Xserver */
181*4882a593Smuzhiyun count = dev_priv->status_ptr[1023];
182*4882a593Smuzhiyun if (count < dev_priv->event_counter)
183*4882a593Smuzhiyun dev_priv->event_wrap++;
184*4882a593Smuzhiyun } else {
185*4882a593Smuzhiyun count = dev_priv->event_counter;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun count = (count + 1) & 0xffff;
188*4882a593Smuzhiyun if (count == 0) {
189*4882a593Smuzhiyun count++; /* See the comment above savage_wait_event_*. */
190*4882a593Smuzhiyun dev_priv->event_wrap++;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun dev_priv->event_counter = count;
193*4882a593Smuzhiyun if (dev_priv->status_ptr)
194*4882a593Smuzhiyun dev_priv->status_ptr[1023] = (uint32_t) count;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
197*4882a593Smuzhiyun unsigned int wait_cmd = BCI_CMD_WAIT;
198*4882a593Smuzhiyun if ((flags & SAVAGE_WAIT_2D))
199*4882a593Smuzhiyun wait_cmd |= BCI_CMD_WAIT_2D;
200*4882a593Smuzhiyun if ((flags & SAVAGE_WAIT_3D))
201*4882a593Smuzhiyun wait_cmd |= BCI_CMD_WAIT_3D;
202*4882a593Smuzhiyun BEGIN_BCI(2);
203*4882a593Smuzhiyun BCI_WRITE(wait_cmd);
204*4882a593Smuzhiyun } else {
205*4882a593Smuzhiyun BEGIN_BCI(1);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun return count;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * Freelist management
214*4882a593Smuzhiyun */
savage_freelist_init(struct drm_device * dev)215*4882a593Smuzhiyun static int savage_freelist_init(struct drm_device * dev)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
218*4882a593Smuzhiyun struct drm_device_dma *dma = dev->dma;
219*4882a593Smuzhiyun struct drm_buf *buf;
220*4882a593Smuzhiyun drm_savage_buf_priv_t *entry;
221*4882a593Smuzhiyun int i;
222*4882a593Smuzhiyun DRM_DEBUG("count=%d\n", dma->buf_count);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun dev_priv->head.next = &dev_priv->tail;
225*4882a593Smuzhiyun dev_priv->head.prev = NULL;
226*4882a593Smuzhiyun dev_priv->head.buf = NULL;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun dev_priv->tail.next = NULL;
229*4882a593Smuzhiyun dev_priv->tail.prev = &dev_priv->head;
230*4882a593Smuzhiyun dev_priv->tail.buf = NULL;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun for (i = 0; i < dma->buf_count; i++) {
233*4882a593Smuzhiyun buf = dma->buflist[i];
234*4882a593Smuzhiyun entry = buf->dev_private;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun SET_AGE(&entry->age, 0, 0);
237*4882a593Smuzhiyun entry->buf = buf;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun entry->next = dev_priv->head.next;
240*4882a593Smuzhiyun entry->prev = &dev_priv->head;
241*4882a593Smuzhiyun dev_priv->head.next->prev = entry;
242*4882a593Smuzhiyun dev_priv->head.next = entry;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
savage_freelist_get(struct drm_device * dev)248*4882a593Smuzhiyun static struct drm_buf *savage_freelist_get(struct drm_device * dev)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
251*4882a593Smuzhiyun drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
252*4882a593Smuzhiyun uint16_t event;
253*4882a593Smuzhiyun unsigned int wrap;
254*4882a593Smuzhiyun DRM_DEBUG("\n");
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun UPDATE_EVENT_COUNTER();
257*4882a593Smuzhiyun if (dev_priv->status_ptr)
258*4882a593Smuzhiyun event = dev_priv->status_ptr[1] & 0xffff;
259*4882a593Smuzhiyun else
260*4882a593Smuzhiyun event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
261*4882a593Smuzhiyun wrap = dev_priv->event_wrap;
262*4882a593Smuzhiyun if (event > dev_priv->event_counter)
263*4882a593Smuzhiyun wrap--; /* hardware hasn't passed the last wrap yet */
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
266*4882a593Smuzhiyun DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
269*4882a593Smuzhiyun drm_savage_buf_priv_t *next = tail->next;
270*4882a593Smuzhiyun drm_savage_buf_priv_t *prev = tail->prev;
271*4882a593Smuzhiyun prev->next = next;
272*4882a593Smuzhiyun next->prev = prev;
273*4882a593Smuzhiyun tail->next = tail->prev = NULL;
274*4882a593Smuzhiyun return tail->buf;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
278*4882a593Smuzhiyun return NULL;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
savage_freelist_put(struct drm_device * dev,struct drm_buf * buf)281*4882a593Smuzhiyun void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
284*4882a593Smuzhiyun drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (entry->next != NULL || entry->prev != NULL) {
289*4882a593Smuzhiyun DRM_ERROR("entry already on freelist.\n");
290*4882a593Smuzhiyun return;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun prev = &dev_priv->head;
294*4882a593Smuzhiyun next = prev->next;
295*4882a593Smuzhiyun prev->next = entry;
296*4882a593Smuzhiyun next->prev = entry;
297*4882a593Smuzhiyun entry->prev = prev;
298*4882a593Smuzhiyun entry->next = next;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * Command DMA
303*4882a593Smuzhiyun */
savage_dma_init(drm_savage_private_t * dev_priv)304*4882a593Smuzhiyun static int savage_dma_init(drm_savage_private_t * dev_priv)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun unsigned int i;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
309*4882a593Smuzhiyun (SAVAGE_DMA_PAGE_SIZE * 4);
310*4882a593Smuzhiyun dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages,
311*4882a593Smuzhiyun sizeof(drm_savage_dma_page_t),
312*4882a593Smuzhiyun GFP_KERNEL);
313*4882a593Smuzhiyun if (dev_priv->dma_pages == NULL)
314*4882a593Smuzhiyun return -ENOMEM;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
317*4882a593Smuzhiyun SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
318*4882a593Smuzhiyun dev_priv->dma_pages[i].used = 0;
319*4882a593Smuzhiyun dev_priv->dma_pages[i].flushed = 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun SET_AGE(&dev_priv->last_dma_age, 0, 0);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun dev_priv->first_dma_page = 0;
324*4882a593Smuzhiyun dev_priv->current_dma_page = 0;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
savage_dma_reset(drm_savage_private_t * dev_priv)329*4882a593Smuzhiyun void savage_dma_reset(drm_savage_private_t * dev_priv)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun uint16_t event;
332*4882a593Smuzhiyun unsigned int wrap, i;
333*4882a593Smuzhiyun event = savage_bci_emit_event(dev_priv, 0);
334*4882a593Smuzhiyun wrap = dev_priv->event_wrap;
335*4882a593Smuzhiyun for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
336*4882a593Smuzhiyun SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
337*4882a593Smuzhiyun dev_priv->dma_pages[i].used = 0;
338*4882a593Smuzhiyun dev_priv->dma_pages[i].flushed = 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun SET_AGE(&dev_priv->last_dma_age, event, wrap);
341*4882a593Smuzhiyun dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
savage_dma_wait(drm_savage_private_t * dev_priv,unsigned int page)344*4882a593Smuzhiyun void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun uint16_t event;
347*4882a593Smuzhiyun unsigned int wrap;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* Faked DMA buffer pages don't age. */
350*4882a593Smuzhiyun if (dev_priv->cmd_dma == &dev_priv->fake_dma)
351*4882a593Smuzhiyun return;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun UPDATE_EVENT_COUNTER();
354*4882a593Smuzhiyun if (dev_priv->status_ptr)
355*4882a593Smuzhiyun event = dev_priv->status_ptr[1] & 0xffff;
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
358*4882a593Smuzhiyun wrap = dev_priv->event_wrap;
359*4882a593Smuzhiyun if (event > dev_priv->event_counter)
360*4882a593Smuzhiyun wrap--; /* hardware hasn't passed the last wrap yet */
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (dev_priv->dma_pages[page].age.wrap > wrap ||
363*4882a593Smuzhiyun (dev_priv->dma_pages[page].age.wrap == wrap &&
364*4882a593Smuzhiyun dev_priv->dma_pages[page].age.event > event)) {
365*4882a593Smuzhiyun if (dev_priv->wait_evnt(dev_priv,
366*4882a593Smuzhiyun dev_priv->dma_pages[page].age.event)
367*4882a593Smuzhiyun < 0)
368*4882a593Smuzhiyun DRM_ERROR("wait_evnt failed!\n");
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
savage_dma_alloc(drm_savage_private_t * dev_priv,unsigned int n)372*4882a593Smuzhiyun uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun unsigned int cur = dev_priv->current_dma_page;
375*4882a593Smuzhiyun unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
376*4882a593Smuzhiyun dev_priv->dma_pages[cur].used;
377*4882a593Smuzhiyun unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
378*4882a593Smuzhiyun SAVAGE_DMA_PAGE_SIZE;
379*4882a593Smuzhiyun uint32_t *dma_ptr;
380*4882a593Smuzhiyun unsigned int i;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
383*4882a593Smuzhiyun cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun if (cur + nr_pages < dev_priv->nr_dma_pages) {
386*4882a593Smuzhiyun dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
387*4882a593Smuzhiyun cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
388*4882a593Smuzhiyun if (n < rest)
389*4882a593Smuzhiyun rest = n;
390*4882a593Smuzhiyun dev_priv->dma_pages[cur].used += rest;
391*4882a593Smuzhiyun n -= rest;
392*4882a593Smuzhiyun cur++;
393*4882a593Smuzhiyun } else {
394*4882a593Smuzhiyun dev_priv->dma_flush(dev_priv);
395*4882a593Smuzhiyun nr_pages =
396*4882a593Smuzhiyun (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
397*4882a593Smuzhiyun for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
398*4882a593Smuzhiyun dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
399*4882a593Smuzhiyun dev_priv->dma_pages[i].used = 0;
400*4882a593Smuzhiyun dev_priv->dma_pages[i].flushed = 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
403*4882a593Smuzhiyun dev_priv->first_dma_page = cur = 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun for (i = cur; nr_pages > 0; ++i, --nr_pages) {
406*4882a593Smuzhiyun #if SAVAGE_DMA_DEBUG
407*4882a593Smuzhiyun if (dev_priv->dma_pages[i].used) {
408*4882a593Smuzhiyun DRM_ERROR("unflushed page %u: used=%u\n",
409*4882a593Smuzhiyun i, dev_priv->dma_pages[i].used);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun #endif
412*4882a593Smuzhiyun if (n > SAVAGE_DMA_PAGE_SIZE)
413*4882a593Smuzhiyun dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
414*4882a593Smuzhiyun else
415*4882a593Smuzhiyun dev_priv->dma_pages[i].used = n;
416*4882a593Smuzhiyun n -= SAVAGE_DMA_PAGE_SIZE;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun dev_priv->current_dma_page = --i;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
421*4882a593Smuzhiyun i, dev_priv->dma_pages[i].used, n);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun savage_dma_wait(dev_priv, dev_priv->current_dma_page);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun return dma_ptr;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
savage_dma_flush(drm_savage_private_t * dev_priv)428*4882a593Smuzhiyun static void savage_dma_flush(drm_savage_private_t * dev_priv)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun unsigned int first = dev_priv->first_dma_page;
431*4882a593Smuzhiyun unsigned int cur = dev_priv->current_dma_page;
432*4882a593Smuzhiyun uint16_t event;
433*4882a593Smuzhiyun unsigned int wrap, pad, align, len, i;
434*4882a593Smuzhiyun unsigned long phys_addr;
435*4882a593Smuzhiyun BCI_LOCALS;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (first == cur &&
438*4882a593Smuzhiyun dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
439*4882a593Smuzhiyun return;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* pad length to multiples of 2 entries
442*4882a593Smuzhiyun * align start of next DMA block to multiles of 8 entries */
443*4882a593Smuzhiyun pad = -dev_priv->dma_pages[cur].used & 1;
444*4882a593Smuzhiyun align = -(dev_priv->dma_pages[cur].used + pad) & 7;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
447*4882a593Smuzhiyun "pad=%u, align=%u\n",
448*4882a593Smuzhiyun first, cur, dev_priv->dma_pages[first].flushed,
449*4882a593Smuzhiyun dev_priv->dma_pages[cur].used, pad, align);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* pad with noops */
452*4882a593Smuzhiyun if (pad) {
453*4882a593Smuzhiyun uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
454*4882a593Smuzhiyun cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
455*4882a593Smuzhiyun dev_priv->dma_pages[cur].used += pad;
456*4882a593Smuzhiyun while (pad != 0) {
457*4882a593Smuzhiyun *dma_ptr++ = BCI_CMD_WAIT;
458*4882a593Smuzhiyun pad--;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun mb();
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* do flush ... */
465*4882a593Smuzhiyun phys_addr = dev_priv->cmd_dma->offset +
466*4882a593Smuzhiyun (first * SAVAGE_DMA_PAGE_SIZE +
467*4882a593Smuzhiyun dev_priv->dma_pages[first].flushed) * 4;
468*4882a593Smuzhiyun len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
469*4882a593Smuzhiyun dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun DRM_DEBUG("phys_addr=%lx, len=%u\n",
472*4882a593Smuzhiyun phys_addr | dev_priv->dma_type, len);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun BEGIN_BCI(3);
475*4882a593Smuzhiyun BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
476*4882a593Smuzhiyun BCI_WRITE(phys_addr | dev_priv->dma_type);
477*4882a593Smuzhiyun BCI_DMA(len);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* fix alignment of the start of the next block */
480*4882a593Smuzhiyun dev_priv->dma_pages[cur].used += align;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /* age DMA pages */
483*4882a593Smuzhiyun event = savage_bci_emit_event(dev_priv, 0);
484*4882a593Smuzhiyun wrap = dev_priv->event_wrap;
485*4882a593Smuzhiyun for (i = first; i < cur; ++i) {
486*4882a593Smuzhiyun SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
487*4882a593Smuzhiyun dev_priv->dma_pages[i].used = 0;
488*4882a593Smuzhiyun dev_priv->dma_pages[i].flushed = 0;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun /* age the current page only when it's full */
491*4882a593Smuzhiyun if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
492*4882a593Smuzhiyun SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
493*4882a593Smuzhiyun dev_priv->dma_pages[cur].used = 0;
494*4882a593Smuzhiyun dev_priv->dma_pages[cur].flushed = 0;
495*4882a593Smuzhiyun /* advance to next page */
496*4882a593Smuzhiyun cur++;
497*4882a593Smuzhiyun if (cur == dev_priv->nr_dma_pages)
498*4882a593Smuzhiyun cur = 0;
499*4882a593Smuzhiyun dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
500*4882a593Smuzhiyun } else {
501*4882a593Smuzhiyun dev_priv->first_dma_page = cur;
502*4882a593Smuzhiyun dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun SET_AGE(&dev_priv->last_dma_age, event, wrap);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
507*4882a593Smuzhiyun dev_priv->dma_pages[cur].used,
508*4882a593Smuzhiyun dev_priv->dma_pages[cur].flushed);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
savage_fake_dma_flush(drm_savage_private_t * dev_priv)511*4882a593Smuzhiyun static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun unsigned int i, j;
514*4882a593Smuzhiyun BCI_LOCALS;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
517*4882a593Smuzhiyun dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
518*4882a593Smuzhiyun return;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
521*4882a593Smuzhiyun dev_priv->first_dma_page, dev_priv->current_dma_page,
522*4882a593Smuzhiyun dev_priv->dma_pages[dev_priv->current_dma_page].used);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun for (i = dev_priv->first_dma_page;
525*4882a593Smuzhiyun i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
526*4882a593Smuzhiyun ++i) {
527*4882a593Smuzhiyun uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
528*4882a593Smuzhiyun i * SAVAGE_DMA_PAGE_SIZE;
529*4882a593Smuzhiyun #if SAVAGE_DMA_DEBUG
530*4882a593Smuzhiyun /* Sanity check: all pages except the last one must be full. */
531*4882a593Smuzhiyun if (i < dev_priv->current_dma_page &&
532*4882a593Smuzhiyun dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
533*4882a593Smuzhiyun DRM_ERROR("partial DMA page %u: used=%u",
534*4882a593Smuzhiyun i, dev_priv->dma_pages[i].used);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun #endif
537*4882a593Smuzhiyun BEGIN_BCI(dev_priv->dma_pages[i].used);
538*4882a593Smuzhiyun for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
539*4882a593Smuzhiyun BCI_WRITE(dma_ptr[j]);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun dev_priv->dma_pages[i].used = 0;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* reset to first page */
545*4882a593Smuzhiyun dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
savage_driver_load(struct drm_device * dev,unsigned long chipset)548*4882a593Smuzhiyun int savage_driver_load(struct drm_device *dev, unsigned long chipset)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun drm_savage_private_t *dev_priv;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
553*4882a593Smuzhiyun if (dev_priv == NULL)
554*4882a593Smuzhiyun return -ENOMEM;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun dev->dev_private = (void *)dev_priv;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun dev_priv->chipset = (enum savage_family)chipset;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun pci_set_master(dev->pdev);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun * Initialize mappings. On Savage4 and SavageIX the alignment
568*4882a593Smuzhiyun * and size of the aperture is not suitable for automatic MTRR setup
569*4882a593Smuzhiyun * in drm_legacy_addmap. Therefore we add them manually before the maps are
570*4882a593Smuzhiyun * initialized, and tear them down on last close.
571*4882a593Smuzhiyun */
savage_driver_firstopen(struct drm_device * dev)572*4882a593Smuzhiyun int savage_driver_firstopen(struct drm_device *dev)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
575*4882a593Smuzhiyun unsigned long mmio_base, fb_base, fb_size, aperture_base;
576*4882a593Smuzhiyun /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
577*4882a593Smuzhiyun * in case we decide we need information on the BAR for BSD in the
578*4882a593Smuzhiyun * future.
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun unsigned int fb_rsrc, aper_rsrc;
581*4882a593Smuzhiyun int ret = 0;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
584*4882a593Smuzhiyun fb_rsrc = 0;
585*4882a593Smuzhiyun fb_base = pci_resource_start(dev->pdev, 0);
586*4882a593Smuzhiyun fb_size = SAVAGE_FB_SIZE_S3;
587*4882a593Smuzhiyun mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
588*4882a593Smuzhiyun aper_rsrc = 0;
589*4882a593Smuzhiyun aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
590*4882a593Smuzhiyun /* this should always be true */
591*4882a593Smuzhiyun if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
592*4882a593Smuzhiyun /* Don't make MMIO write-cobining! We need 3
593*4882a593Smuzhiyun * MTRRs. */
594*4882a593Smuzhiyun dev_priv->mtrr_handles[0] =
595*4882a593Smuzhiyun arch_phys_wc_add(fb_base, 0x01000000);
596*4882a593Smuzhiyun dev_priv->mtrr_handles[1] =
597*4882a593Smuzhiyun arch_phys_wc_add(fb_base + 0x02000000,
598*4882a593Smuzhiyun 0x02000000);
599*4882a593Smuzhiyun dev_priv->mtrr_handles[2] =
600*4882a593Smuzhiyun arch_phys_wc_add(fb_base + 0x04000000,
601*4882a593Smuzhiyun 0x04000000);
602*4882a593Smuzhiyun } else {
603*4882a593Smuzhiyun DRM_ERROR("strange pci_resource_len %08llx\n",
604*4882a593Smuzhiyun (unsigned long long)
605*4882a593Smuzhiyun pci_resource_len(dev->pdev, 0));
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
608*4882a593Smuzhiyun dev_priv->chipset != S3_SAVAGE2000) {
609*4882a593Smuzhiyun mmio_base = pci_resource_start(dev->pdev, 0);
610*4882a593Smuzhiyun fb_rsrc = 1;
611*4882a593Smuzhiyun fb_base = pci_resource_start(dev->pdev, 1);
612*4882a593Smuzhiyun fb_size = SAVAGE_FB_SIZE_S4;
613*4882a593Smuzhiyun aper_rsrc = 1;
614*4882a593Smuzhiyun aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
615*4882a593Smuzhiyun /* this should always be true */
616*4882a593Smuzhiyun if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
617*4882a593Smuzhiyun /* Can use one MTRR to cover both fb and
618*4882a593Smuzhiyun * aperture. */
619*4882a593Smuzhiyun dev_priv->mtrr_handles[0] =
620*4882a593Smuzhiyun arch_phys_wc_add(fb_base,
621*4882a593Smuzhiyun 0x08000000);
622*4882a593Smuzhiyun } else {
623*4882a593Smuzhiyun DRM_ERROR("strange pci_resource_len %08llx\n",
624*4882a593Smuzhiyun (unsigned long long)
625*4882a593Smuzhiyun pci_resource_len(dev->pdev, 1));
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun } else {
628*4882a593Smuzhiyun mmio_base = pci_resource_start(dev->pdev, 0);
629*4882a593Smuzhiyun fb_rsrc = 1;
630*4882a593Smuzhiyun fb_base = pci_resource_start(dev->pdev, 1);
631*4882a593Smuzhiyun fb_size = pci_resource_len(dev->pdev, 1);
632*4882a593Smuzhiyun aper_rsrc = 2;
633*4882a593Smuzhiyun aperture_base = pci_resource_start(dev->pdev, 2);
634*4882a593Smuzhiyun /* Automatic MTRR setup will do the right thing. */
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
638*4882a593Smuzhiyun _DRM_REGISTERS, _DRM_READ_ONLY,
639*4882a593Smuzhiyun &dev_priv->mmio);
640*4882a593Smuzhiyun if (ret)
641*4882a593Smuzhiyun return ret;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
644*4882a593Smuzhiyun _DRM_WRITE_COMBINING, &dev_priv->fb);
645*4882a593Smuzhiyun if (ret)
646*4882a593Smuzhiyun return ret;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
649*4882a593Smuzhiyun _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
650*4882a593Smuzhiyun &dev_priv->aperture);
651*4882a593Smuzhiyun return ret;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /*
655*4882a593Smuzhiyun * Delete MTRRs and free device-private data.
656*4882a593Smuzhiyun */
savage_driver_lastclose(struct drm_device * dev)657*4882a593Smuzhiyun void savage_driver_lastclose(struct drm_device *dev)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
660*4882a593Smuzhiyun int i;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun for (i = 0; i < 3; ++i) {
663*4882a593Smuzhiyun arch_phys_wc_del(dev_priv->mtrr_handles[i]);
664*4882a593Smuzhiyun dev_priv->mtrr_handles[i] = 0;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
savage_driver_unload(struct drm_device * dev)668*4882a593Smuzhiyun void savage_driver_unload(struct drm_device *dev)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun kfree(dev_priv);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
savage_do_init_bci(struct drm_device * dev,drm_savage_init_t * init)675*4882a593Smuzhiyun static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (init->fb_bpp != 16 && init->fb_bpp != 32) {
680*4882a593Smuzhiyun DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
681*4882a593Smuzhiyun return -EINVAL;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun if (init->depth_bpp != 16 && init->depth_bpp != 32) {
684*4882a593Smuzhiyun DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
685*4882a593Smuzhiyun return -EINVAL;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun if (init->dma_type != SAVAGE_DMA_AGP &&
688*4882a593Smuzhiyun init->dma_type != SAVAGE_DMA_PCI) {
689*4882a593Smuzhiyun DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
690*4882a593Smuzhiyun return -EINVAL;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun dev_priv->cob_size = init->cob_size;
694*4882a593Smuzhiyun dev_priv->bci_threshold_lo = init->bci_threshold_lo;
695*4882a593Smuzhiyun dev_priv->bci_threshold_hi = init->bci_threshold_hi;
696*4882a593Smuzhiyun dev_priv->dma_type = init->dma_type;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun dev_priv->fb_bpp = init->fb_bpp;
699*4882a593Smuzhiyun dev_priv->front_offset = init->front_offset;
700*4882a593Smuzhiyun dev_priv->front_pitch = init->front_pitch;
701*4882a593Smuzhiyun dev_priv->back_offset = init->back_offset;
702*4882a593Smuzhiyun dev_priv->back_pitch = init->back_pitch;
703*4882a593Smuzhiyun dev_priv->depth_bpp = init->depth_bpp;
704*4882a593Smuzhiyun dev_priv->depth_offset = init->depth_offset;
705*4882a593Smuzhiyun dev_priv->depth_pitch = init->depth_pitch;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun dev_priv->texture_offset = init->texture_offset;
708*4882a593Smuzhiyun dev_priv->texture_size = init->texture_size;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun dev_priv->sarea = drm_legacy_getsarea(dev);
711*4882a593Smuzhiyun if (!dev_priv->sarea) {
712*4882a593Smuzhiyun DRM_ERROR("could not find sarea!\n");
713*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
714*4882a593Smuzhiyun return -EINVAL;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun if (init->status_offset != 0) {
717*4882a593Smuzhiyun dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
718*4882a593Smuzhiyun if (!dev_priv->status) {
719*4882a593Smuzhiyun DRM_ERROR("could not find shadow status region!\n");
720*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
721*4882a593Smuzhiyun return -EINVAL;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun } else {
724*4882a593Smuzhiyun dev_priv->status = NULL;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
727*4882a593Smuzhiyun dev->agp_buffer_token = init->buffers_offset;
728*4882a593Smuzhiyun dev->agp_buffer_map = drm_legacy_findmap(dev,
729*4882a593Smuzhiyun init->buffers_offset);
730*4882a593Smuzhiyun if (!dev->agp_buffer_map) {
731*4882a593Smuzhiyun DRM_ERROR("could not find DMA buffer region!\n");
732*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
733*4882a593Smuzhiyun return -EINVAL;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun drm_legacy_ioremap(dev->agp_buffer_map, dev);
736*4882a593Smuzhiyun if (!dev->agp_buffer_map->handle) {
737*4882a593Smuzhiyun DRM_ERROR("failed to ioremap DMA buffer region!\n");
738*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
739*4882a593Smuzhiyun return -ENOMEM;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun if (init->agp_textures_offset) {
743*4882a593Smuzhiyun dev_priv->agp_textures =
744*4882a593Smuzhiyun drm_legacy_findmap(dev, init->agp_textures_offset);
745*4882a593Smuzhiyun if (!dev_priv->agp_textures) {
746*4882a593Smuzhiyun DRM_ERROR("could not find agp texture region!\n");
747*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
748*4882a593Smuzhiyun return -EINVAL;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun } else {
751*4882a593Smuzhiyun dev_priv->agp_textures = NULL;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (init->cmd_dma_offset) {
755*4882a593Smuzhiyun if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
756*4882a593Smuzhiyun DRM_ERROR("command DMA not supported on "
757*4882a593Smuzhiyun "Savage3D/MX/IX.\n");
758*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
759*4882a593Smuzhiyun return -EINVAL;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun if (dev->dma && dev->dma->buflist) {
762*4882a593Smuzhiyun DRM_ERROR("command and vertex DMA not supported "
763*4882a593Smuzhiyun "at the same time.\n");
764*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
765*4882a593Smuzhiyun return -EINVAL;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset);
768*4882a593Smuzhiyun if (!dev_priv->cmd_dma) {
769*4882a593Smuzhiyun DRM_ERROR("could not find command DMA region!\n");
770*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
771*4882a593Smuzhiyun return -EINVAL;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
774*4882a593Smuzhiyun if (dev_priv->cmd_dma->type != _DRM_AGP) {
775*4882a593Smuzhiyun DRM_ERROR("AGP command DMA region is not a "
776*4882a593Smuzhiyun "_DRM_AGP map!\n");
777*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
778*4882a593Smuzhiyun return -EINVAL;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun drm_legacy_ioremap(dev_priv->cmd_dma, dev);
781*4882a593Smuzhiyun if (!dev_priv->cmd_dma->handle) {
782*4882a593Smuzhiyun DRM_ERROR("failed to ioremap command "
783*4882a593Smuzhiyun "DMA region!\n");
784*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
785*4882a593Smuzhiyun return -ENOMEM;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
788*4882a593Smuzhiyun DRM_ERROR("PCI command DMA region is not a "
789*4882a593Smuzhiyun "_DRM_CONSISTENT map!\n");
790*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
791*4882a593Smuzhiyun return -EINVAL;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun } else {
794*4882a593Smuzhiyun dev_priv->cmd_dma = NULL;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun dev_priv->dma_flush = savage_dma_flush;
798*4882a593Smuzhiyun if (!dev_priv->cmd_dma) {
799*4882a593Smuzhiyun DRM_DEBUG("falling back to faked command DMA.\n");
800*4882a593Smuzhiyun dev_priv->fake_dma.offset = 0;
801*4882a593Smuzhiyun dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
802*4882a593Smuzhiyun dev_priv->fake_dma.type = _DRM_SHM;
803*4882a593Smuzhiyun dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
804*4882a593Smuzhiyun GFP_KERNEL);
805*4882a593Smuzhiyun if (!dev_priv->fake_dma.handle) {
806*4882a593Smuzhiyun DRM_ERROR("could not allocate faked DMA buffer!\n");
807*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
808*4882a593Smuzhiyun return -ENOMEM;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun dev_priv->cmd_dma = &dev_priv->fake_dma;
811*4882a593Smuzhiyun dev_priv->dma_flush = savage_fake_dma_flush;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun dev_priv->sarea_priv =
815*4882a593Smuzhiyun (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
816*4882a593Smuzhiyun init->sarea_priv_offset);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /* setup bitmap descriptors */
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun unsigned int color_tile_format;
821*4882a593Smuzhiyun unsigned int depth_tile_format;
822*4882a593Smuzhiyun unsigned int front_stride, back_stride, depth_stride;
823*4882a593Smuzhiyun if (dev_priv->chipset <= S3_SAVAGE4) {
824*4882a593Smuzhiyun color_tile_format = dev_priv->fb_bpp == 16 ?
825*4882a593Smuzhiyun SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
826*4882a593Smuzhiyun depth_tile_format = dev_priv->depth_bpp == 16 ?
827*4882a593Smuzhiyun SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
828*4882a593Smuzhiyun } else {
829*4882a593Smuzhiyun color_tile_format = SAVAGE_BD_TILE_DEST;
830*4882a593Smuzhiyun depth_tile_format = SAVAGE_BD_TILE_DEST;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
833*4882a593Smuzhiyun back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
834*4882a593Smuzhiyun depth_stride =
835*4882a593Smuzhiyun dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
838*4882a593Smuzhiyun (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
839*4882a593Smuzhiyun (color_tile_format << SAVAGE_BD_TILE_SHIFT);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
842*4882a593Smuzhiyun (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
843*4882a593Smuzhiyun (color_tile_format << SAVAGE_BD_TILE_SHIFT);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
846*4882a593Smuzhiyun (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
847*4882a593Smuzhiyun (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* setup status and bci ptr */
851*4882a593Smuzhiyun dev_priv->event_counter = 0;
852*4882a593Smuzhiyun dev_priv->event_wrap = 0;
853*4882a593Smuzhiyun dev_priv->bci_ptr = (volatile uint32_t *)
854*4882a593Smuzhiyun ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
855*4882a593Smuzhiyun if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
856*4882a593Smuzhiyun dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
857*4882a593Smuzhiyun } else {
858*4882a593Smuzhiyun dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun if (dev_priv->status != NULL) {
861*4882a593Smuzhiyun dev_priv->status_ptr =
862*4882a593Smuzhiyun (volatile uint32_t *)dev_priv->status->handle;
863*4882a593Smuzhiyun dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
864*4882a593Smuzhiyun dev_priv->wait_evnt = savage_bci_wait_event_shadow;
865*4882a593Smuzhiyun dev_priv->status_ptr[1023] = dev_priv->event_counter;
866*4882a593Smuzhiyun } else {
867*4882a593Smuzhiyun dev_priv->status_ptr = NULL;
868*4882a593Smuzhiyun if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
869*4882a593Smuzhiyun dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
870*4882a593Smuzhiyun } else {
871*4882a593Smuzhiyun dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun dev_priv->wait_evnt = savage_bci_wait_event_reg;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* cliprect functions */
877*4882a593Smuzhiyun if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
878*4882a593Smuzhiyun dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
879*4882a593Smuzhiyun else
880*4882a593Smuzhiyun dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (savage_freelist_init(dev) < 0) {
883*4882a593Smuzhiyun DRM_ERROR("could not initialize freelist\n");
884*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
885*4882a593Smuzhiyun return -ENOMEM;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (savage_dma_init(dev_priv) < 0) {
889*4882a593Smuzhiyun DRM_ERROR("could not initialize command DMA\n");
890*4882a593Smuzhiyun savage_do_cleanup_bci(dev);
891*4882a593Smuzhiyun return -ENOMEM;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun return 0;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
savage_do_cleanup_bci(struct drm_device * dev)897*4882a593Smuzhiyun static int savage_do_cleanup_bci(struct drm_device * dev)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
902*4882a593Smuzhiyun kfree(dev_priv->fake_dma.handle);
903*4882a593Smuzhiyun } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
904*4882a593Smuzhiyun dev_priv->cmd_dma->type == _DRM_AGP &&
905*4882a593Smuzhiyun dev_priv->dma_type == SAVAGE_DMA_AGP)
906*4882a593Smuzhiyun drm_legacy_ioremapfree(dev_priv->cmd_dma, dev);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
909*4882a593Smuzhiyun dev->agp_buffer_map && dev->agp_buffer_map->handle) {
910*4882a593Smuzhiyun drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
911*4882a593Smuzhiyun /* make sure the next instance (which may be running
912*4882a593Smuzhiyun * in PCI mode) doesn't try to use an old
913*4882a593Smuzhiyun * agp_buffer_map. */
914*4882a593Smuzhiyun dev->agp_buffer_map = NULL;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun kfree(dev_priv->dma_pages);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
savage_bci_init(struct drm_device * dev,void * data,struct drm_file * file_priv)922*4882a593Smuzhiyun static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun drm_savage_init_t *init = data;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun LOCK_TEST_WITH_RETURN(dev, file_priv);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun switch (init->func) {
929*4882a593Smuzhiyun case SAVAGE_INIT_BCI:
930*4882a593Smuzhiyun return savage_do_init_bci(dev, init);
931*4882a593Smuzhiyun case SAVAGE_CLEANUP_BCI:
932*4882a593Smuzhiyun return savage_do_cleanup_bci(dev);
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun return -EINVAL;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
savage_bci_event_emit(struct drm_device * dev,void * data,struct drm_file * file_priv)938*4882a593Smuzhiyun static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
941*4882a593Smuzhiyun drm_savage_event_emit_t *event = data;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun DRM_DEBUG("\n");
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun LOCK_TEST_WITH_RETURN(dev, file_priv);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun event->count = savage_bci_emit_event(dev_priv, event->flags);
948*4882a593Smuzhiyun event->count |= dev_priv->event_wrap << 16;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun return 0;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
savage_bci_event_wait(struct drm_device * dev,void * data,struct drm_file * file_priv)953*4882a593Smuzhiyun static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
956*4882a593Smuzhiyun drm_savage_event_wait_t *event = data;
957*4882a593Smuzhiyun unsigned int event_e, hw_e;
958*4882a593Smuzhiyun unsigned int event_w, hw_w;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun DRM_DEBUG("\n");
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun UPDATE_EVENT_COUNTER();
963*4882a593Smuzhiyun if (dev_priv->status_ptr)
964*4882a593Smuzhiyun hw_e = dev_priv->status_ptr[1] & 0xffff;
965*4882a593Smuzhiyun else
966*4882a593Smuzhiyun hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
967*4882a593Smuzhiyun hw_w = dev_priv->event_wrap;
968*4882a593Smuzhiyun if (hw_e > dev_priv->event_counter)
969*4882a593Smuzhiyun hw_w--; /* hardware hasn't passed the last wrap yet */
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun event_e = event->count & 0xffff;
972*4882a593Smuzhiyun event_w = event->count >> 16;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* Don't need to wait if
975*4882a593Smuzhiyun * - event counter wrapped since the event was emitted or
976*4882a593Smuzhiyun * - the hardware has advanced up to or over the event to wait for.
977*4882a593Smuzhiyun */
978*4882a593Smuzhiyun if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
979*4882a593Smuzhiyun return 0;
980*4882a593Smuzhiyun else
981*4882a593Smuzhiyun return dev_priv->wait_evnt(dev_priv, event_e);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /*
985*4882a593Smuzhiyun * DMA buffer management
986*4882a593Smuzhiyun */
987*4882a593Smuzhiyun
savage_bci_get_buffers(struct drm_device * dev,struct drm_file * file_priv,struct drm_dma * d)988*4882a593Smuzhiyun static int savage_bci_get_buffers(struct drm_device *dev,
989*4882a593Smuzhiyun struct drm_file *file_priv,
990*4882a593Smuzhiyun struct drm_dma *d)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct drm_buf *buf;
993*4882a593Smuzhiyun int i;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun for (i = d->granted_count; i < d->request_count; i++) {
996*4882a593Smuzhiyun buf = savage_freelist_get(dev);
997*4882a593Smuzhiyun if (!buf)
998*4882a593Smuzhiyun return -EAGAIN;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun buf->file_priv = file_priv;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun if (copy_to_user(&d->request_indices[i],
1003*4882a593Smuzhiyun &buf->idx, sizeof(buf->idx)))
1004*4882a593Smuzhiyun return -EFAULT;
1005*4882a593Smuzhiyun if (copy_to_user(&d->request_sizes[i],
1006*4882a593Smuzhiyun &buf->total, sizeof(buf->total)))
1007*4882a593Smuzhiyun return -EFAULT;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun d->granted_count++;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun return 0;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
savage_bci_buffers(struct drm_device * dev,void * data,struct drm_file * file_priv)1014*4882a593Smuzhiyun int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun struct drm_device_dma *dma = dev->dma;
1017*4882a593Smuzhiyun struct drm_dma *d = data;
1018*4882a593Smuzhiyun int ret = 0;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun LOCK_TEST_WITH_RETURN(dev, file_priv);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /* Please don't send us buffers.
1023*4882a593Smuzhiyun */
1024*4882a593Smuzhiyun if (d->send_count != 0) {
1025*4882a593Smuzhiyun DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1026*4882a593Smuzhiyun task_pid_nr(current), d->send_count);
1027*4882a593Smuzhiyun return -EINVAL;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* We'll send you buffers.
1031*4882a593Smuzhiyun */
1032*4882a593Smuzhiyun if (d->request_count < 0 || d->request_count > dma->buf_count) {
1033*4882a593Smuzhiyun DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1034*4882a593Smuzhiyun task_pid_nr(current), d->request_count, dma->buf_count);
1035*4882a593Smuzhiyun return -EINVAL;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun d->granted_count = 0;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun if (d->request_count) {
1041*4882a593Smuzhiyun ret = savage_bci_get_buffers(dev, file_priv, d);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun return ret;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
savage_reclaim_buffers(struct drm_device * dev,struct drm_file * file_priv)1047*4882a593Smuzhiyun void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun struct drm_device_dma *dma = dev->dma;
1050*4882a593Smuzhiyun drm_savage_private_t *dev_priv = dev->dev_private;
1051*4882a593Smuzhiyun int release_idlelock = 0;
1052*4882a593Smuzhiyun int i;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (!dma)
1055*4882a593Smuzhiyun return;
1056*4882a593Smuzhiyun if (!dev_priv)
1057*4882a593Smuzhiyun return;
1058*4882a593Smuzhiyun if (!dma->buflist)
1059*4882a593Smuzhiyun return;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun if (file_priv->master && file_priv->master->lock.hw_lock) {
1062*4882a593Smuzhiyun drm_legacy_idlelock_take(&file_priv->master->lock);
1063*4882a593Smuzhiyun release_idlelock = 1;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun for (i = 0; i < dma->buf_count; i++) {
1067*4882a593Smuzhiyun struct drm_buf *buf = dma->buflist[i];
1068*4882a593Smuzhiyun drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (buf->file_priv == file_priv && buf_priv &&
1071*4882a593Smuzhiyun buf_priv->next == NULL && buf_priv->prev == NULL) {
1072*4882a593Smuzhiyun uint16_t event;
1073*4882a593Smuzhiyun DRM_DEBUG("reclaimed from client\n");
1074*4882a593Smuzhiyun event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1075*4882a593Smuzhiyun SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1076*4882a593Smuzhiyun savage_freelist_put(dev, buf);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun if (release_idlelock)
1081*4882a593Smuzhiyun drm_legacy_idlelock_release(&file_priv->master->lock);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun const struct drm_ioctl_desc savage_ioctls[] = {
1085*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1086*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1087*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1088*4882a593Smuzhiyun DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1089*4882a593Smuzhiyun };
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
1092