1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4*4882a593Smuzhiyun * Author: Rob Clark <rob.clark@linaro.org>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <drm/drm_vblank.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "omap_drv.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun struct omap_irq_wait {
12*4882a593Smuzhiyun struct list_head node;
13*4882a593Smuzhiyun wait_queue_head_t wq;
14*4882a593Smuzhiyun u32 irqmask;
15*4882a593Smuzhiyun int count;
16*4882a593Smuzhiyun };
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* call with wait_lock and dispc runtime held */
omap_irq_update(struct drm_device * dev)19*4882a593Smuzhiyun static void omap_irq_update(struct drm_device *dev)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
22*4882a593Smuzhiyun struct omap_irq_wait *wait;
23*4882a593Smuzhiyun u32 irqmask = priv->irq_mask;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun assert_spin_locked(&priv->wait_lock);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun list_for_each_entry(wait, &priv->wait_list, node)
28*4882a593Smuzhiyun irqmask |= wait->irqmask;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun DBG("irqmask=%08x", irqmask);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
omap_irq_wait_handler(struct omap_irq_wait * wait)35*4882a593Smuzhiyun static void omap_irq_wait_handler(struct omap_irq_wait *wait)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun wait->count--;
38*4882a593Smuzhiyun wake_up(&wait->wq);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
omap_irq_wait_init(struct drm_device * dev,u32 irqmask,int count)41*4882a593Smuzhiyun struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
42*4882a593Smuzhiyun u32 irqmask, int count)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
45*4882a593Smuzhiyun struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
46*4882a593Smuzhiyun unsigned long flags;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun init_waitqueue_head(&wait->wq);
49*4882a593Smuzhiyun wait->irqmask = irqmask;
50*4882a593Smuzhiyun wait->count = count;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun spin_lock_irqsave(&priv->wait_lock, flags);
53*4882a593Smuzhiyun list_add(&wait->node, &priv->wait_list);
54*4882a593Smuzhiyun omap_irq_update(dev);
55*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->wait_lock, flags);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return wait;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
omap_irq_wait(struct drm_device * dev,struct omap_irq_wait * wait,unsigned long timeout)60*4882a593Smuzhiyun int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
61*4882a593Smuzhiyun unsigned long timeout)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
64*4882a593Smuzhiyun unsigned long flags;
65*4882a593Smuzhiyun int ret;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun spin_lock_irqsave(&priv->wait_lock, flags);
70*4882a593Smuzhiyun list_del(&wait->node);
71*4882a593Smuzhiyun omap_irq_update(dev);
72*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->wait_lock, flags);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun kfree(wait);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return ret == 0 ? -1 : 0;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
omap_irq_enable_framedone(struct drm_crtc * crtc,bool enable)79*4882a593Smuzhiyun int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
82*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
83*4882a593Smuzhiyun unsigned long flags;
84*4882a593Smuzhiyun enum omap_channel channel = omap_crtc_channel(crtc);
85*4882a593Smuzhiyun int framedone_irq =
86*4882a593Smuzhiyun priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun spin_lock_irqsave(&priv->wait_lock, flags);
91*4882a593Smuzhiyun if (enable)
92*4882a593Smuzhiyun priv->irq_mask |= framedone_irq;
93*4882a593Smuzhiyun else
94*4882a593Smuzhiyun priv->irq_mask &= ~framedone_irq;
95*4882a593Smuzhiyun omap_irq_update(dev);
96*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->wait_lock, flags);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * enable_vblank - enable vblank interrupt events
103*4882a593Smuzhiyun * @dev: DRM device
104*4882a593Smuzhiyun * @pipe: which irq to enable
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * Enable vblank interrupts for @crtc. If the device doesn't have
107*4882a593Smuzhiyun * a hardware vblank counter, this routine should be a no-op, since
108*4882a593Smuzhiyun * interrupts will have to stay on to keep the count accurate.
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * RETURNS
111*4882a593Smuzhiyun * Zero on success, appropriate errno if the given @crtc's vblank
112*4882a593Smuzhiyun * interrupt cannot be enabled.
113*4882a593Smuzhiyun */
omap_irq_enable_vblank(struct drm_crtc * crtc)114*4882a593Smuzhiyun int omap_irq_enable_vblank(struct drm_crtc *crtc)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
117*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
118*4882a593Smuzhiyun unsigned long flags;
119*4882a593Smuzhiyun enum omap_channel channel = omap_crtc_channel(crtc);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun DBG("dev=%p, crtc=%u", dev, channel);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun spin_lock_irqsave(&priv->wait_lock, flags);
124*4882a593Smuzhiyun priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
125*4882a593Smuzhiyun channel);
126*4882a593Smuzhiyun omap_irq_update(dev);
127*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->wait_lock, flags);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun * disable_vblank - disable vblank interrupt events
134*4882a593Smuzhiyun * @dev: DRM device
135*4882a593Smuzhiyun * @pipe: which irq to enable
136*4882a593Smuzhiyun *
137*4882a593Smuzhiyun * Disable vblank interrupts for @crtc. If the device doesn't have
138*4882a593Smuzhiyun * a hardware vblank counter, this routine should be a no-op, since
139*4882a593Smuzhiyun * interrupts will have to stay on to keep the count accurate.
140*4882a593Smuzhiyun */
omap_irq_disable_vblank(struct drm_crtc * crtc)141*4882a593Smuzhiyun void omap_irq_disable_vblank(struct drm_crtc *crtc)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct drm_device *dev = crtc->dev;
144*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
145*4882a593Smuzhiyun unsigned long flags;
146*4882a593Smuzhiyun enum omap_channel channel = omap_crtc_channel(crtc);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun DBG("dev=%p, crtc=%u", dev, channel);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun spin_lock_irqsave(&priv->wait_lock, flags);
151*4882a593Smuzhiyun priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
152*4882a593Smuzhiyun channel);
153*4882a593Smuzhiyun omap_irq_update(dev);
154*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->wait_lock, flags);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
omap_irq_fifo_underflow(struct omap_drm_private * priv,u32 irqstatus)157*4882a593Smuzhiyun static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
158*4882a593Smuzhiyun u32 irqstatus)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
161*4882a593Smuzhiyun DEFAULT_RATELIMIT_BURST);
162*4882a593Smuzhiyun static const struct {
163*4882a593Smuzhiyun const char *name;
164*4882a593Smuzhiyun u32 mask;
165*4882a593Smuzhiyun } sources[] = {
166*4882a593Smuzhiyun { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
167*4882a593Smuzhiyun { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
168*4882a593Smuzhiyun { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
169*4882a593Smuzhiyun { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
173*4882a593Smuzhiyun | DISPC_IRQ_VID1_FIFO_UNDERFLOW
174*4882a593Smuzhiyun | DISPC_IRQ_VID2_FIFO_UNDERFLOW
175*4882a593Smuzhiyun | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
176*4882a593Smuzhiyun unsigned int i;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun spin_lock(&priv->wait_lock);
179*4882a593Smuzhiyun irqstatus &= priv->irq_mask & mask;
180*4882a593Smuzhiyun spin_unlock(&priv->wait_lock);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (!irqstatus)
183*4882a593Smuzhiyun return;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (!__ratelimit(&_rs))
186*4882a593Smuzhiyun return;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun DRM_ERROR("FIFO underflow on ");
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sources); ++i) {
191*4882a593Smuzhiyun if (sources[i].mask & irqstatus)
192*4882a593Smuzhiyun pr_cont("%s ", sources[i].name);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun pr_cont("(0x%08x)\n", irqstatus);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
omap_irq_ocp_error_handler(struct drm_device * dev,u32 irqstatus)198*4882a593Smuzhiyun static void omap_irq_ocp_error_handler(struct drm_device *dev,
199*4882a593Smuzhiyun u32 irqstatus)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun if (!(irqstatus & DISPC_IRQ_OCP_ERR))
202*4882a593Smuzhiyun return;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun dev_err_ratelimited(dev->dev, "OCP error\n");
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
omap_irq_handler(int irq,void * arg)207*4882a593Smuzhiyun static irqreturn_t omap_irq_handler(int irq, void *arg)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct drm_device *dev = (struct drm_device *) arg;
210*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
211*4882a593Smuzhiyun struct omap_irq_wait *wait, *n;
212*4882a593Smuzhiyun unsigned long flags;
213*4882a593Smuzhiyun unsigned int id;
214*4882a593Smuzhiyun u32 irqstatus;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
217*4882a593Smuzhiyun priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
218*4882a593Smuzhiyun priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun VERB("irqs: %08x", irqstatus);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun for (id = 0; id < priv->num_pipes; id++) {
223*4882a593Smuzhiyun struct drm_crtc *crtc = priv->pipes[id].crtc;
224*4882a593Smuzhiyun enum omap_channel channel = omap_crtc_channel(crtc);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
227*4882a593Smuzhiyun drm_handle_vblank(dev, id);
228*4882a593Smuzhiyun omap_crtc_vblank_irq(crtc);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
232*4882a593Smuzhiyun omap_crtc_error_irq(crtc, irqstatus);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
235*4882a593Smuzhiyun omap_crtc_framedone_irq(crtc, irqstatus);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun omap_irq_ocp_error_handler(dev, irqstatus);
239*4882a593Smuzhiyun omap_irq_fifo_underflow(priv, irqstatus);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun spin_lock_irqsave(&priv->wait_lock, flags);
242*4882a593Smuzhiyun list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
243*4882a593Smuzhiyun if (wait->irqmask & irqstatus)
244*4882a593Smuzhiyun omap_irq_wait_handler(wait);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->wait_lock, flags);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return IRQ_HANDLED;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun static const u32 omap_underflow_irqs[] = {
252*4882a593Smuzhiyun [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
253*4882a593Smuzhiyun [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
254*4882a593Smuzhiyun [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
255*4882a593Smuzhiyun [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * We need a special version, instead of just using drm_irq_install(),
260*4882a593Smuzhiyun * because we need to register the irq via omapdss. Once omapdss and
261*4882a593Smuzhiyun * omapdrm are merged together we can assign the dispc hwmod data to
262*4882a593Smuzhiyun * ourselves and drop these and just use drm_irq_{install,uninstall}()
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun
omap_drm_irq_install(struct drm_device * dev)265*4882a593Smuzhiyun int omap_drm_irq_install(struct drm_device *dev)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
268*4882a593Smuzhiyun unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
269*4882a593Smuzhiyun unsigned int max_planes;
270*4882a593Smuzhiyun unsigned int i;
271*4882a593Smuzhiyun int ret;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun spin_lock_init(&priv->wait_lock);
274*4882a593Smuzhiyun INIT_LIST_HEAD(&priv->wait_list);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun priv->irq_mask = DISPC_IRQ_OCP_ERR;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun max_planes = min(ARRAY_SIZE(priv->planes),
279*4882a593Smuzhiyun ARRAY_SIZE(omap_underflow_irqs));
280*4882a593Smuzhiyun for (i = 0; i < max_planes; ++i) {
281*4882a593Smuzhiyun if (priv->planes[i])
282*4882a593Smuzhiyun priv->irq_mask |= omap_underflow_irqs[i];
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun for (i = 0; i < num_mgrs; ++i)
286*4882a593Smuzhiyun priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun priv->dispc_ops->runtime_get(priv->dispc);
289*4882a593Smuzhiyun priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
290*4882a593Smuzhiyun priv->dispc_ops->runtime_put(priv->dispc);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
293*4882a593Smuzhiyun if (ret < 0)
294*4882a593Smuzhiyun return ret;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun dev->irq_enabled = true;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
omap_drm_irq_uninstall(struct drm_device * dev)301*4882a593Smuzhiyun void omap_drm_irq_uninstall(struct drm_device *dev)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct omap_drm_private *priv = dev->dev_private;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (!dev->irq_enabled)
306*4882a593Smuzhiyun return;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun dev->irq_enabled = false;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun priv->dispc_ops->free_irq(priv->dispc, dev);
311*4882a593Smuzhiyun }
312