1*4882a593Smuzhiyun /* via_irq.c
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright 2004 BEAM Ltd.
4*4882a593Smuzhiyun * Copyright 2002 Tungsten Graphics, Inc.
5*4882a593Smuzhiyun * Copyright 2005 Thomas Hellstrom.
6*4882a593Smuzhiyun * All Rights Reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
9*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
10*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
11*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
13*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
16*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
17*4882a593Smuzhiyun * Software.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun * DAMAGES OR
24*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26*4882a593Smuzhiyun * DEALINGS IN THE SOFTWARE.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Authors:
29*4882a593Smuzhiyun * Terry Barnaby <terry1@beam.ltd.uk>
30*4882a593Smuzhiyun * Keith Whitwell <keith@tungstengraphics.com>
31*4882a593Smuzhiyun * Thomas Hellstrom <unichrome@shipmail.org>
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
34*4882a593Smuzhiyun * interrupt, as well as an infrastructure to handle other interrupts of the chip.
35*4882a593Smuzhiyun * The refresh rate is also calculated for video playback sync purposes.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <drm/drm_device.h>
39*4882a593Smuzhiyun #include <drm/drm_vblank.h>
40*4882a593Smuzhiyun #include <drm/via_drm.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include "via_drv.h"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define VIA_REG_INTERRUPT 0x200
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* VIA_REG_INTERRUPT */
47*4882a593Smuzhiyun #define VIA_IRQ_GLOBAL (1 << 31)
48*4882a593Smuzhiyun #define VIA_IRQ_VBLANK_ENABLE (1 << 19)
49*4882a593Smuzhiyun #define VIA_IRQ_VBLANK_PENDING (1 << 3)
50*4882a593Smuzhiyun #define VIA_IRQ_HQV0_ENABLE (1 << 11)
51*4882a593Smuzhiyun #define VIA_IRQ_HQV1_ENABLE (1 << 25)
52*4882a593Smuzhiyun #define VIA_IRQ_HQV0_PENDING (1 << 9)
53*4882a593Smuzhiyun #define VIA_IRQ_HQV1_PENDING (1 << 10)
54*4882a593Smuzhiyun #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
55*4882a593Smuzhiyun #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
56*4882a593Smuzhiyun #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
57*4882a593Smuzhiyun #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
58*4882a593Smuzhiyun #define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
59*4882a593Smuzhiyun #define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
60*4882a593Smuzhiyun #define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
61*4882a593Smuzhiyun #define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Device-specific IRQs go here. This type might need to be extended with
66*4882a593Smuzhiyun * the register if there are multiple IRQ control registers.
67*4882a593Smuzhiyun * Currently we activate the HQV interrupts of Unichrome Pro group A.
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static maskarray_t via_pro_group_a_irqs[] = {
71*4882a593Smuzhiyun {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
72*4882a593Smuzhiyun 0x00000000 },
73*4882a593Smuzhiyun {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
74*4882a593Smuzhiyun 0x00000000 },
75*4882a593Smuzhiyun {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
76*4882a593Smuzhiyun VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
77*4882a593Smuzhiyun {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
78*4882a593Smuzhiyun VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
81*4882a593Smuzhiyun static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static maskarray_t via_unichrome_irqs[] = {
84*4882a593Smuzhiyun {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
85*4882a593Smuzhiyun VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
86*4882a593Smuzhiyun {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
87*4882a593Smuzhiyun VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
90*4882a593Smuzhiyun static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun
via_get_vblank_counter(struct drm_device * dev,unsigned int pipe)93*4882a593Smuzhiyun u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun drm_via_private_t *dev_priv = dev->dev_private;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (pipe != 0)
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return atomic_read(&dev_priv->vbl_received);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
via_driver_irq_handler(int irq,void * arg)103*4882a593Smuzhiyun irqreturn_t via_driver_irq_handler(int irq, void *arg)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct drm_device *dev = (struct drm_device *) arg;
106*4882a593Smuzhiyun drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
107*4882a593Smuzhiyun u32 status;
108*4882a593Smuzhiyun int handled = 0;
109*4882a593Smuzhiyun ktime_t cur_vblank;
110*4882a593Smuzhiyun drm_via_irq_t *cur_irq = dev_priv->via_irqs;
111*4882a593Smuzhiyun int i;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
114*4882a593Smuzhiyun if (status & VIA_IRQ_VBLANK_PENDING) {
115*4882a593Smuzhiyun atomic_inc(&dev_priv->vbl_received);
116*4882a593Smuzhiyun if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
117*4882a593Smuzhiyun cur_vblank = ktime_get();
118*4882a593Smuzhiyun if (dev_priv->last_vblank_valid) {
119*4882a593Smuzhiyun dev_priv->nsec_per_vblank =
120*4882a593Smuzhiyun ktime_sub(cur_vblank,
121*4882a593Smuzhiyun dev_priv->last_vblank) >> 4;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun dev_priv->last_vblank = cur_vblank;
124*4882a593Smuzhiyun dev_priv->last_vblank_valid = 1;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
127*4882a593Smuzhiyun DRM_DEBUG("nsec per vblank is: %llu\n",
128*4882a593Smuzhiyun ktime_to_ns(dev_priv->nsec_per_vblank));
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun drm_handle_vblank(dev, 0);
131*4882a593Smuzhiyun handled = 1;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun for (i = 0; i < dev_priv->num_irqs; ++i) {
135*4882a593Smuzhiyun if (status & cur_irq->pending_mask) {
136*4882a593Smuzhiyun atomic_inc(&cur_irq->irq_received);
137*4882a593Smuzhiyun wake_up(&cur_irq->irq_queue);
138*4882a593Smuzhiyun handled = 1;
139*4882a593Smuzhiyun if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
140*4882a593Smuzhiyun via_dmablit_handler(dev, 0, 1);
141*4882a593Smuzhiyun else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
142*4882a593Smuzhiyun via_dmablit_handler(dev, 1, 1);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun cur_irq++;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* Acknowledge interrupts */
148*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (handled)
152*4882a593Smuzhiyun return IRQ_HANDLED;
153*4882a593Smuzhiyun else
154*4882a593Smuzhiyun return IRQ_NONE;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)157*4882a593Smuzhiyun static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun u32 status;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (dev_priv) {
162*4882a593Smuzhiyun /* Acknowledge interrupts */
163*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
164*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status |
165*4882a593Smuzhiyun dev_priv->irq_pending_mask);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
via_enable_vblank(struct drm_device * dev,unsigned int pipe)169*4882a593Smuzhiyun int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun drm_via_private_t *dev_priv = dev->dev_private;
172*4882a593Smuzhiyun u32 status;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (pipe != 0) {
175*4882a593Smuzhiyun DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
176*4882a593Smuzhiyun return -EINVAL;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
180*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun via_write8(dev_priv, 0x83d4, 0x11);
183*4882a593Smuzhiyun via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
via_disable_vblank(struct drm_device * dev,unsigned int pipe)188*4882a593Smuzhiyun void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun drm_via_private_t *dev_priv = dev->dev_private;
191*4882a593Smuzhiyun u32 status;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
194*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun via_write8(dev_priv, 0x83d4, 0x11);
197*4882a593Smuzhiyun via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (pipe != 0)
200*4882a593Smuzhiyun DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun static int
via_driver_irq_wait(struct drm_device * dev,unsigned int irq,int force_sequence,unsigned int * sequence)204*4882a593Smuzhiyun via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
205*4882a593Smuzhiyun unsigned int *sequence)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
208*4882a593Smuzhiyun unsigned int cur_irq_sequence;
209*4882a593Smuzhiyun drm_via_irq_t *cur_irq;
210*4882a593Smuzhiyun int ret = 0;
211*4882a593Smuzhiyun maskarray_t *masks;
212*4882a593Smuzhiyun int real_irq;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun DRM_DEBUG("\n");
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (!dev_priv) {
217*4882a593Smuzhiyun DRM_ERROR("called with no initialization\n");
218*4882a593Smuzhiyun return -EINVAL;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (irq >= drm_via_irq_num) {
222*4882a593Smuzhiyun DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
223*4882a593Smuzhiyun return -EINVAL;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun real_irq = dev_priv->irq_map[irq];
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (real_irq < 0) {
229*4882a593Smuzhiyun DRM_ERROR("Video IRQ %d not available on this hardware.\n",
230*4882a593Smuzhiyun irq);
231*4882a593Smuzhiyun return -EINVAL;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun masks = dev_priv->irq_masks;
235*4882a593Smuzhiyun cur_irq = dev_priv->via_irqs + real_irq;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (masks[real_irq][2] && !force_sequence) {
238*4882a593Smuzhiyun VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
239*4882a593Smuzhiyun ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
240*4882a593Smuzhiyun masks[irq][4]));
241*4882a593Smuzhiyun cur_irq_sequence = atomic_read(&cur_irq->irq_received);
242*4882a593Smuzhiyun } else {
243*4882a593Smuzhiyun VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
244*4882a593Smuzhiyun (((cur_irq_sequence =
245*4882a593Smuzhiyun atomic_read(&cur_irq->irq_received)) -
246*4882a593Smuzhiyun *sequence) <= (1 << 23)));
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun *sequence = cur_irq_sequence;
249*4882a593Smuzhiyun return ret;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * drm_dma.h hooks
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun
via_driver_irq_preinstall(struct drm_device * dev)257*4882a593Smuzhiyun void via_driver_irq_preinstall(struct drm_device *dev)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
260*4882a593Smuzhiyun u32 status;
261*4882a593Smuzhiyun drm_via_irq_t *cur_irq;
262*4882a593Smuzhiyun int i;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun DRM_DEBUG("dev_priv: %p\n", dev_priv);
265*4882a593Smuzhiyun if (dev_priv) {
266*4882a593Smuzhiyun cur_irq = dev_priv->via_irqs;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
269*4882a593Smuzhiyun dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (dev_priv->chipset == VIA_PRO_GROUP_A ||
272*4882a593Smuzhiyun dev_priv->chipset == VIA_DX9_0) {
273*4882a593Smuzhiyun dev_priv->irq_masks = via_pro_group_a_irqs;
274*4882a593Smuzhiyun dev_priv->num_irqs = via_num_pro_group_a;
275*4882a593Smuzhiyun dev_priv->irq_map = via_irqmap_pro_group_a;
276*4882a593Smuzhiyun } else {
277*4882a593Smuzhiyun dev_priv->irq_masks = via_unichrome_irqs;
278*4882a593Smuzhiyun dev_priv->num_irqs = via_num_unichrome;
279*4882a593Smuzhiyun dev_priv->irq_map = via_irqmap_unichrome;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun for (i = 0; i < dev_priv->num_irqs; ++i) {
283*4882a593Smuzhiyun atomic_set(&cur_irq->irq_received, 0);
284*4882a593Smuzhiyun cur_irq->enable_mask = dev_priv->irq_masks[i][0];
285*4882a593Smuzhiyun cur_irq->pending_mask = dev_priv->irq_masks[i][1];
286*4882a593Smuzhiyun init_waitqueue_head(&cur_irq->irq_queue);
287*4882a593Smuzhiyun dev_priv->irq_enable_mask |= cur_irq->enable_mask;
288*4882a593Smuzhiyun dev_priv->irq_pending_mask |= cur_irq->pending_mask;
289*4882a593Smuzhiyun cur_irq++;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun DRM_DEBUG("Initializing IRQ %d\n", i);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun dev_priv->last_vblank_valid = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Clear VSync interrupt regs */
297*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
298*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status &
299*4882a593Smuzhiyun ~(dev_priv->irq_enable_mask));
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Clear bits if they're already high */
302*4882a593Smuzhiyun viadrv_acknowledge_irqs(dev_priv);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
via_driver_irq_postinstall(struct drm_device * dev)306*4882a593Smuzhiyun int via_driver_irq_postinstall(struct drm_device *dev)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
309*4882a593Smuzhiyun u32 status;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun DRM_DEBUG("via_driver_irq_postinstall\n");
312*4882a593Smuzhiyun if (!dev_priv)
313*4882a593Smuzhiyun return -EINVAL;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
316*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
317*4882a593Smuzhiyun | dev_priv->irq_enable_mask);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Some magic, oh for some data sheets ! */
320*4882a593Smuzhiyun via_write8(dev_priv, 0x83d4, 0x11);
321*4882a593Smuzhiyun via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
via_driver_irq_uninstall(struct drm_device * dev)326*4882a593Smuzhiyun void via_driver_irq_uninstall(struct drm_device *dev)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
329*4882a593Smuzhiyun u32 status;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun DRM_DEBUG("\n");
332*4882a593Smuzhiyun if (dev_priv) {
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* Some more magic, oh for some data sheets ! */
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun via_write8(dev_priv, 0x83d4, 0x11);
337*4882a593Smuzhiyun via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun status = via_read(dev_priv, VIA_REG_INTERRUPT);
340*4882a593Smuzhiyun via_write(dev_priv, VIA_REG_INTERRUPT, status &
341*4882a593Smuzhiyun ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
via_wait_irq(struct drm_device * dev,void * data,struct drm_file * file_priv)345*4882a593Smuzhiyun int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun drm_via_irqwait_t *irqwait = data;
348*4882a593Smuzhiyun struct timespec64 now;
349*4882a593Smuzhiyun int ret = 0;
350*4882a593Smuzhiyun drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
351*4882a593Smuzhiyun drm_via_irq_t *cur_irq = dev_priv->via_irqs;
352*4882a593Smuzhiyun int force_sequence;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (irqwait->request.irq >= dev_priv->num_irqs) {
355*4882a593Smuzhiyun DRM_ERROR("Trying to wait on unknown irq %d\n",
356*4882a593Smuzhiyun irqwait->request.irq);
357*4882a593Smuzhiyun return -EINVAL;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun cur_irq += irqwait->request.irq;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
363*4882a593Smuzhiyun case VIA_IRQ_RELATIVE:
364*4882a593Smuzhiyun irqwait->request.sequence +=
365*4882a593Smuzhiyun atomic_read(&cur_irq->irq_received);
366*4882a593Smuzhiyun irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
367*4882a593Smuzhiyun case VIA_IRQ_ABSOLUTE:
368*4882a593Smuzhiyun break;
369*4882a593Smuzhiyun default:
370*4882a593Smuzhiyun return -EINVAL;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (irqwait->request.type & VIA_IRQ_SIGNAL) {
374*4882a593Smuzhiyun DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
375*4882a593Smuzhiyun return -EINVAL;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
381*4882a593Smuzhiyun &irqwait->request.sequence);
382*4882a593Smuzhiyun ktime_get_ts64(&now);
383*4882a593Smuzhiyun irqwait->reply.tval_sec = now.tv_sec;
384*4882a593Smuzhiyun irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return ret;
387*4882a593Smuzhiyun }
388