1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright © 2015 Intel Corporation
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun * IN THE SOFTWARE.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/kernel.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "i915_drv.h"
27*4882a593Smuzhiyun #include "intel_display_types.h"
28*4882a593Smuzhiyun #include "intel_hotplug.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /**
31*4882a593Smuzhiyun * DOC: Hotplug
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Simply put, hotplug occurs when a display is connected to or disconnected
34*4882a593Smuzhiyun * from the system. However, there may be adapters and docking stations and
35*4882a593Smuzhiyun * Display Port short pulses and MST devices involved, complicating matters.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Hotplug in i915 is handled in many different levels of abstraction.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * The platform dependent interrupt handling code in i915_irq.c enables,
40*4882a593Smuzhiyun * disables, and does preliminary handling of the interrupts. The interrupt
41*4882a593Smuzhiyun * handlers gather the hotplug detect (HPD) information from relevant registers
42*4882a593Smuzhiyun * into a platform independent mask of hotplug pins that have fired.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * The platform independent interrupt handler intel_hpd_irq_handler() in
45*4882a593Smuzhiyun * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
46*4882a593Smuzhiyun * further processing to appropriate bottom halves (Display Port specific and
47*4882a593Smuzhiyun * regular hotplug).
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * The Display Port work function i915_digport_work_func() calls into
50*4882a593Smuzhiyun * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
51*4882a593Smuzhiyun * pulses, with failures and non-MST long pulses triggering regular hotplug
52*4882a593Smuzhiyun * processing on the connector.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * The regular hotplug work function i915_hotplug_work_func() calls connector
55*4882a593Smuzhiyun * detect hooks, and, if connector status changes, triggers sending of hotplug
56*4882a593Smuzhiyun * uevent to userspace via drm_kms_helper_hotplug_event().
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Finally, the userspace is responsible for triggering a modeset upon receiving
59*4882a593Smuzhiyun * the hotplug uevent, disabling or enabling the crtc as needed.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * The hotplug interrupt storm detection and mitigation code keeps track of the
62*4882a593Smuzhiyun * number of interrupts per hotplug pin per a period of time, and if the number
63*4882a593Smuzhiyun * of interrupts exceeds a certain threshold, the interrupt is disabled for a
64*4882a593Smuzhiyun * while before being re-enabled. The intention is to mitigate issues raising
65*4882a593Smuzhiyun * from broken hardware triggering massive amounts of interrupts and grinding
66*4882a593Smuzhiyun * the system to a halt.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Current implementation expects that hotplug interrupt storm will not be
69*4882a593Smuzhiyun * seen when display port sink is connected, hence on platforms whose DP
70*4882a593Smuzhiyun * callback is handled by i915_digport_work_func reenabling of hpd is not
71*4882a593Smuzhiyun * performed (it was never expected to be disabled in the first place ;) )
72*4882a593Smuzhiyun * this is specific to DP sinks handled by this routine and any other display
73*4882a593Smuzhiyun * such as HDMI or DVI enabled on the same port will have proper logic since
74*4882a593Smuzhiyun * it will use i915_hotplug_work_func where this logic is handled.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * intel_hpd_pin_default - return default pin associated with certain port.
79*4882a593Smuzhiyun * @dev_priv: private driver data pointer
80*4882a593Smuzhiyun * @port: the hpd port to get associated pin
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * It is only valid and used by digital port encoder.
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * Return pin that is associatade with @port.
85*4882a593Smuzhiyun */
intel_hpd_pin_default(struct drm_i915_private * dev_priv,enum port port)86*4882a593Smuzhiyun enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
87*4882a593Smuzhiyun enum port port)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun return HPD_PORT_A + port - PORT_A;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #define HPD_STORM_DETECT_PERIOD 1000
93*4882a593Smuzhiyun #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
94*4882a593Smuzhiyun #define HPD_RETRY_DELAY 1000
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static enum hpd_pin
intel_connector_hpd_pin(struct intel_connector * connector)97*4882a593Smuzhiyun intel_connector_hpd_pin(struct intel_connector *connector)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct intel_encoder *encoder = intel_attached_encoder(connector);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * MST connectors get their encoder attached dynamically
103*4882a593Smuzhiyun * so need to make sure we have an encoder here. But since
104*4882a593Smuzhiyun * MST encoders have their hpd_pin set to HPD_NONE we don't
105*4882a593Smuzhiyun * have to special case them beyond that.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun return encoder ? encoder->hpd_pin : HPD_NONE;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
112*4882a593Smuzhiyun * @dev_priv: private driver data pointer
113*4882a593Smuzhiyun * @pin: the pin to gather stats on
114*4882a593Smuzhiyun * @long_hpd: whether the HPD IRQ was long or short
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
117*4882a593Smuzhiyun * storms. Only the pin specific stats and state are changed, the caller is
118*4882a593Smuzhiyun * responsible for further action.
119*4882a593Smuzhiyun *
120*4882a593Smuzhiyun * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
121*4882a593Smuzhiyun * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
122*4882a593Smuzhiyun * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
123*4882a593Smuzhiyun * short IRQs count as +1. If this threshold is exceeded, it's considered an
124*4882a593Smuzhiyun * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * By default, most systems will only count long IRQs towards
127*4882a593Smuzhiyun * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
128*4882a593Smuzhiyun * suffer from short IRQ storms and must also track these. Because short IRQ
129*4882a593Smuzhiyun * storms are naturally caused by sideband interactions with DP MST devices,
130*4882a593Smuzhiyun * short IRQ detection is only enabled for systems without DP MST support.
131*4882a593Smuzhiyun * Systems which are new enough to support DP MST are far less likely to
132*4882a593Smuzhiyun * suffer from IRQ storms at all, so this is fine.
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
135*4882a593Smuzhiyun * and should only be adjusted for automated hotplug testing.
136*4882a593Smuzhiyun *
137*4882a593Smuzhiyun * Return true if an IRQ storm was detected on @pin.
138*4882a593Smuzhiyun */
intel_hpd_irq_storm_detect(struct drm_i915_private * dev_priv,enum hpd_pin pin,bool long_hpd)139*4882a593Smuzhiyun static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
140*4882a593Smuzhiyun enum hpd_pin pin, bool long_hpd)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct i915_hotplug *hpd = &dev_priv->hotplug;
143*4882a593Smuzhiyun unsigned long start = hpd->stats[pin].last_jiffies;
144*4882a593Smuzhiyun unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
145*4882a593Smuzhiyun const int increment = long_hpd ? 10 : 1;
146*4882a593Smuzhiyun const int threshold = hpd->hpd_storm_threshold;
147*4882a593Smuzhiyun bool storm = false;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (!threshold ||
150*4882a593Smuzhiyun (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
151*4882a593Smuzhiyun return false;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (!time_in_range(jiffies, start, end)) {
154*4882a593Smuzhiyun hpd->stats[pin].last_jiffies = jiffies;
155*4882a593Smuzhiyun hpd->stats[pin].count = 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun hpd->stats[pin].count += increment;
159*4882a593Smuzhiyun if (hpd->stats[pin].count > threshold) {
160*4882a593Smuzhiyun hpd->stats[pin].state = HPD_MARK_DISABLED;
161*4882a593Smuzhiyun drm_dbg_kms(&dev_priv->drm,
162*4882a593Smuzhiyun "HPD interrupt storm detected on PIN %d\n", pin);
163*4882a593Smuzhiyun storm = true;
164*4882a593Smuzhiyun } else {
165*4882a593Smuzhiyun drm_dbg_kms(&dev_priv->drm,
166*4882a593Smuzhiyun "Received HPD interrupt on PIN %d - cnt: %d\n",
167*4882a593Smuzhiyun pin,
168*4882a593Smuzhiyun hpd->stats[pin].count);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return storm;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private * dev_priv)175*4882a593Smuzhiyun intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct drm_device *dev = &dev_priv->drm;
178*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
179*4882a593Smuzhiyun struct intel_connector *connector;
180*4882a593Smuzhiyun bool hpd_disabled = false;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun lockdep_assert_held(&dev_priv->irq_lock);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun drm_connector_list_iter_begin(dev, &conn_iter);
185*4882a593Smuzhiyun for_each_intel_connector_iter(connector, &conn_iter) {
186*4882a593Smuzhiyun enum hpd_pin pin;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
189*4882a593Smuzhiyun continue;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun pin = intel_connector_hpd_pin(connector);
192*4882a593Smuzhiyun if (pin == HPD_NONE ||
193*4882a593Smuzhiyun dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
194*4882a593Smuzhiyun continue;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun drm_info(&dev_priv->drm,
197*4882a593Smuzhiyun "HPD interrupt storm detected on connector %s: "
198*4882a593Smuzhiyun "switching from hotplug detection to polling\n",
199*4882a593Smuzhiyun connector->base.name);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
202*4882a593Smuzhiyun connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
203*4882a593Smuzhiyun DRM_CONNECTOR_POLL_DISCONNECT;
204*4882a593Smuzhiyun hpd_disabled = true;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Enable polling and queue hotplug re-enabling. */
209*4882a593Smuzhiyun if (hpd_disabled) {
210*4882a593Smuzhiyun drm_kms_helper_poll_enable(dev);
211*4882a593Smuzhiyun mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
212*4882a593Smuzhiyun msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
intel_hpd_irq_storm_reenable_work(struct work_struct * work)216*4882a593Smuzhiyun static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct drm_i915_private *dev_priv =
219*4882a593Smuzhiyun container_of(work, typeof(*dev_priv),
220*4882a593Smuzhiyun hotplug.reenable_work.work);
221*4882a593Smuzhiyun struct drm_device *dev = &dev_priv->drm;
222*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
223*4882a593Smuzhiyun struct intel_connector *connector;
224*4882a593Smuzhiyun intel_wakeref_t wakeref;
225*4882a593Smuzhiyun enum hpd_pin pin;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun drm_connector_list_iter_begin(dev, &conn_iter);
232*4882a593Smuzhiyun for_each_intel_connector_iter(connector, &conn_iter) {
233*4882a593Smuzhiyun pin = intel_connector_hpd_pin(connector);
234*4882a593Smuzhiyun if (pin == HPD_NONE ||
235*4882a593Smuzhiyun dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
236*4882a593Smuzhiyun continue;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (connector->base.polled != connector->polled)
239*4882a593Smuzhiyun drm_dbg(&dev_priv->drm,
240*4882a593Smuzhiyun "Reenabling HPD on connector %s\n",
241*4882a593Smuzhiyun connector->base.name);
242*4882a593Smuzhiyun connector->base.polled = connector->polled;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun for_each_hpd_pin(pin) {
247*4882a593Smuzhiyun if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
248*4882a593Smuzhiyun dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
252*4882a593Smuzhiyun dev_priv->display.hpd_irq_setup(dev_priv);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun enum intel_hotplug_state
intel_encoder_hotplug(struct intel_encoder * encoder,struct intel_connector * connector)260*4882a593Smuzhiyun intel_encoder_hotplug(struct intel_encoder *encoder,
261*4882a593Smuzhiyun struct intel_connector *connector)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct drm_device *dev = connector->base.dev;
264*4882a593Smuzhiyun enum drm_connector_status old_status;
265*4882a593Smuzhiyun u64 old_epoch_counter;
266*4882a593Smuzhiyun bool ret = false;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
269*4882a593Smuzhiyun old_status = connector->base.status;
270*4882a593Smuzhiyun old_epoch_counter = connector->base.epoch_counter;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun connector->base.status =
273*4882a593Smuzhiyun drm_helper_probe_detect(&connector->base, NULL, false);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (old_epoch_counter != connector->base.epoch_counter)
276*4882a593Smuzhiyun ret = true;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (ret) {
279*4882a593Smuzhiyun DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
280*4882a593Smuzhiyun connector->base.base.id,
281*4882a593Smuzhiyun connector->base.name,
282*4882a593Smuzhiyun drm_get_connector_status_name(old_status),
283*4882a593Smuzhiyun drm_get_connector_status_name(connector->base.status),
284*4882a593Smuzhiyun old_epoch_counter,
285*4882a593Smuzhiyun connector->base.epoch_counter);
286*4882a593Smuzhiyun return INTEL_HOTPLUG_CHANGED;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun return INTEL_HOTPLUG_UNCHANGED;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
intel_encoder_has_hpd_pulse(struct intel_encoder * encoder)291*4882a593Smuzhiyun static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun return intel_encoder_is_dig_port(encoder) &&
294*4882a593Smuzhiyun enc_to_dig_port(encoder)->hpd_pulse != NULL;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
i915_digport_work_func(struct work_struct * work)297*4882a593Smuzhiyun static void i915_digport_work_func(struct work_struct *work)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct drm_i915_private *dev_priv =
300*4882a593Smuzhiyun container_of(work, struct drm_i915_private, hotplug.dig_port_work);
301*4882a593Smuzhiyun u32 long_port_mask, short_port_mask;
302*4882a593Smuzhiyun struct intel_encoder *encoder;
303*4882a593Smuzhiyun u32 old_bits = 0;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
306*4882a593Smuzhiyun long_port_mask = dev_priv->hotplug.long_port_mask;
307*4882a593Smuzhiyun dev_priv->hotplug.long_port_mask = 0;
308*4882a593Smuzhiyun short_port_mask = dev_priv->hotplug.short_port_mask;
309*4882a593Smuzhiyun dev_priv->hotplug.short_port_mask = 0;
310*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun for_each_intel_encoder(&dev_priv->drm, encoder) {
313*4882a593Smuzhiyun struct intel_digital_port *dig_port;
314*4882a593Smuzhiyun enum port port = encoder->port;
315*4882a593Smuzhiyun bool long_hpd, short_hpd;
316*4882a593Smuzhiyun enum irqreturn ret;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (!intel_encoder_has_hpd_pulse(encoder))
319*4882a593Smuzhiyun continue;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun long_hpd = long_port_mask & BIT(port);
322*4882a593Smuzhiyun short_hpd = short_port_mask & BIT(port);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (!long_hpd && !short_hpd)
325*4882a593Smuzhiyun continue;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun dig_port = enc_to_dig_port(encoder);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun ret = dig_port->hpd_pulse(dig_port, long_hpd);
330*4882a593Smuzhiyun if (ret == IRQ_NONE) {
331*4882a593Smuzhiyun /* fall back to old school hpd */
332*4882a593Smuzhiyun old_bits |= BIT(encoder->hpd_pin);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (old_bits) {
337*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
338*4882a593Smuzhiyun dev_priv->hotplug.event_bits |= old_bits;
339*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
340*4882a593Smuzhiyun queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun * intel_hpd_trigger_irq - trigger an hpd irq event for a port
346*4882a593Smuzhiyun * @dig_port: digital port
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * Trigger an HPD interrupt event for the given port, emulating a short pulse
349*4882a593Smuzhiyun * generated by the sink, and schedule the dig port work to handle it.
350*4882a593Smuzhiyun */
intel_hpd_trigger_irq(struct intel_digital_port * dig_port)351*4882a593Smuzhiyun void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun spin_lock_irq(&i915->irq_lock);
356*4882a593Smuzhiyun i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
357*4882a593Smuzhiyun spin_unlock_irq(&i915->irq_lock);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * Handle hotplug events outside the interrupt handler proper.
364*4882a593Smuzhiyun */
i915_hotplug_work_func(struct work_struct * work)365*4882a593Smuzhiyun static void i915_hotplug_work_func(struct work_struct *work)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct drm_i915_private *dev_priv =
368*4882a593Smuzhiyun container_of(work, struct drm_i915_private,
369*4882a593Smuzhiyun hotplug.hotplug_work.work);
370*4882a593Smuzhiyun struct drm_device *dev = &dev_priv->drm;
371*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
372*4882a593Smuzhiyun struct intel_connector *connector;
373*4882a593Smuzhiyun u32 changed = 0, retry = 0;
374*4882a593Smuzhiyun u32 hpd_event_bits;
375*4882a593Smuzhiyun u32 hpd_retry_bits;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun mutex_lock(&dev->mode_config.mutex);
378*4882a593Smuzhiyun drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun hpd_event_bits = dev_priv->hotplug.event_bits;
383*4882a593Smuzhiyun dev_priv->hotplug.event_bits = 0;
384*4882a593Smuzhiyun hpd_retry_bits = dev_priv->hotplug.retry_bits;
385*4882a593Smuzhiyun dev_priv->hotplug.retry_bits = 0;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* Enable polling for connectors which had HPD IRQ storms */
388*4882a593Smuzhiyun intel_hpd_irq_storm_switch_to_polling(dev_priv);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun drm_connector_list_iter_begin(dev, &conn_iter);
393*4882a593Smuzhiyun for_each_intel_connector_iter(connector, &conn_iter) {
394*4882a593Smuzhiyun enum hpd_pin pin;
395*4882a593Smuzhiyun u32 hpd_bit;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun pin = intel_connector_hpd_pin(connector);
398*4882a593Smuzhiyun if (pin == HPD_NONE)
399*4882a593Smuzhiyun continue;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun hpd_bit = BIT(pin);
402*4882a593Smuzhiyun if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
403*4882a593Smuzhiyun struct intel_encoder *encoder =
404*4882a593Smuzhiyun intel_attached_encoder(connector);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (hpd_event_bits & hpd_bit)
407*4882a593Smuzhiyun connector->hotplug_retries = 0;
408*4882a593Smuzhiyun else
409*4882a593Smuzhiyun connector->hotplug_retries++;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun drm_dbg_kms(&dev_priv->drm,
412*4882a593Smuzhiyun "Connector %s (pin %i) received hotplug event. (retry %d)\n",
413*4882a593Smuzhiyun connector->base.name, pin,
414*4882a593Smuzhiyun connector->hotplug_retries);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun switch (encoder->hotplug(encoder, connector)) {
417*4882a593Smuzhiyun case INTEL_HOTPLUG_UNCHANGED:
418*4882a593Smuzhiyun break;
419*4882a593Smuzhiyun case INTEL_HOTPLUG_CHANGED:
420*4882a593Smuzhiyun changed |= hpd_bit;
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun case INTEL_HOTPLUG_RETRY:
423*4882a593Smuzhiyun retry |= hpd_bit;
424*4882a593Smuzhiyun break;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
429*4882a593Smuzhiyun mutex_unlock(&dev->mode_config.mutex);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (changed)
432*4882a593Smuzhiyun drm_kms_helper_hotplug_event(dev);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Remove shared HPD pins that have changed */
435*4882a593Smuzhiyun retry &= ~changed;
436*4882a593Smuzhiyun if (retry) {
437*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
438*4882a593Smuzhiyun dev_priv->hotplug.retry_bits |= retry;
439*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
442*4882a593Smuzhiyun msecs_to_jiffies(HPD_RETRY_DELAY));
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun * intel_hpd_irq_handler - main hotplug irq handler
449*4882a593Smuzhiyun * @dev_priv: drm_i915_private
450*4882a593Smuzhiyun * @pin_mask: a mask of hpd pins that have triggered the irq
451*4882a593Smuzhiyun * @long_mask: a mask of hpd pins that may be long hpd pulses
452*4882a593Smuzhiyun *
453*4882a593Smuzhiyun * This is the main hotplug irq handler for all platforms. The platform specific
454*4882a593Smuzhiyun * irq handlers call the platform specific hotplug irq handlers, which read and
455*4882a593Smuzhiyun * decode the appropriate registers into bitmasks about hpd pins that have
456*4882a593Smuzhiyun * triggered (@pin_mask), and which of those pins may be long pulses
457*4882a593Smuzhiyun * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
458*4882a593Smuzhiyun * is not a digital port.
459*4882a593Smuzhiyun *
460*4882a593Smuzhiyun * Here, we do hotplug irq storm detection and mitigation, and pass further
461*4882a593Smuzhiyun * processing to appropriate bottom halves.
462*4882a593Smuzhiyun */
intel_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 pin_mask,u32 long_mask)463*4882a593Smuzhiyun void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
464*4882a593Smuzhiyun u32 pin_mask, u32 long_mask)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct intel_encoder *encoder;
467*4882a593Smuzhiyun bool storm_detected = false;
468*4882a593Smuzhiyun bool queue_dig = false, queue_hp = false;
469*4882a593Smuzhiyun u32 long_hpd_pulse_mask = 0;
470*4882a593Smuzhiyun u32 short_hpd_pulse_mask = 0;
471*4882a593Smuzhiyun enum hpd_pin pin;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (!pin_mask)
474*4882a593Smuzhiyun return;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun spin_lock(&dev_priv->irq_lock);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * Determine whether ->hpd_pulse() exists for each pin, and
480*4882a593Smuzhiyun * whether we have a short or a long pulse. This is needed
481*4882a593Smuzhiyun * as each pin may have up to two encoders (HDMI and DP) and
482*4882a593Smuzhiyun * only the one of them (DP) will have ->hpd_pulse().
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun for_each_intel_encoder(&dev_priv->drm, encoder) {
485*4882a593Smuzhiyun enum port port = encoder->port;
486*4882a593Smuzhiyun bool long_hpd;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun pin = encoder->hpd_pin;
489*4882a593Smuzhiyun if (!(BIT(pin) & pin_mask))
490*4882a593Smuzhiyun continue;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (!intel_encoder_has_hpd_pulse(encoder))
493*4882a593Smuzhiyun continue;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun long_hpd = long_mask & BIT(pin);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun drm_dbg(&dev_priv->drm,
498*4882a593Smuzhiyun "digital hpd on [ENCODER:%d:%s] - %s\n",
499*4882a593Smuzhiyun encoder->base.base.id, encoder->base.name,
500*4882a593Smuzhiyun long_hpd ? "long" : "short");
501*4882a593Smuzhiyun queue_dig = true;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (long_hpd) {
504*4882a593Smuzhiyun long_hpd_pulse_mask |= BIT(pin);
505*4882a593Smuzhiyun dev_priv->hotplug.long_port_mask |= BIT(port);
506*4882a593Smuzhiyun } else {
507*4882a593Smuzhiyun short_hpd_pulse_mask |= BIT(pin);
508*4882a593Smuzhiyun dev_priv->hotplug.short_port_mask |= BIT(port);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* Now process each pin just once */
513*4882a593Smuzhiyun for_each_hpd_pin(pin) {
514*4882a593Smuzhiyun bool long_hpd;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (!(BIT(pin) & pin_mask))
517*4882a593Smuzhiyun continue;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
520*4882a593Smuzhiyun /*
521*4882a593Smuzhiyun * On GMCH platforms the interrupt mask bits only
522*4882a593Smuzhiyun * prevent irq generation, not the setting of the
523*4882a593Smuzhiyun * hotplug bits itself. So only WARN about unexpected
524*4882a593Smuzhiyun * interrupts on saner platforms.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
527*4882a593Smuzhiyun "Received HPD interrupt on pin %d although disabled\n",
528*4882a593Smuzhiyun pin);
529*4882a593Smuzhiyun continue;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
533*4882a593Smuzhiyun continue;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /*
536*4882a593Smuzhiyun * Delegate to ->hpd_pulse() if one of the encoders for this
537*4882a593Smuzhiyun * pin has it, otherwise let the hotplug_work deal with this
538*4882a593Smuzhiyun * pin directly.
539*4882a593Smuzhiyun */
540*4882a593Smuzhiyun if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
541*4882a593Smuzhiyun long_hpd = long_hpd_pulse_mask & BIT(pin);
542*4882a593Smuzhiyun } else {
543*4882a593Smuzhiyun dev_priv->hotplug.event_bits |= BIT(pin);
544*4882a593Smuzhiyun long_hpd = true;
545*4882a593Smuzhiyun queue_hp = true;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
549*4882a593Smuzhiyun dev_priv->hotplug.event_bits &= ~BIT(pin);
550*4882a593Smuzhiyun storm_detected = true;
551*4882a593Smuzhiyun queue_hp = true;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * Disable any IRQs that storms were detected on. Polling enablement
557*4882a593Smuzhiyun * happens later in our hotplug work.
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun if (storm_detected && dev_priv->display_irqs_enabled)
560*4882a593Smuzhiyun dev_priv->display.hpd_irq_setup(dev_priv);
561*4882a593Smuzhiyun spin_unlock(&dev_priv->irq_lock);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * Our hotplug handler can grab modeset locks (by calling down into the
565*4882a593Smuzhiyun * fb helpers). Hence it must not be run on our own dev-priv->wq work
566*4882a593Smuzhiyun * queue for otherwise the flush_work in the pageflip code will
567*4882a593Smuzhiyun * deadlock.
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun if (queue_dig)
570*4882a593Smuzhiyun queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
571*4882a593Smuzhiyun if (queue_hp)
572*4882a593Smuzhiyun queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /**
576*4882a593Smuzhiyun * intel_hpd_init - initializes and enables hpd support
577*4882a593Smuzhiyun * @dev_priv: i915 device instance
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * This function enables the hotplug support. It requires that interrupts have
580*4882a593Smuzhiyun * already been enabled with intel_irq_init_hw(). From this point on hotplug and
581*4882a593Smuzhiyun * poll request can run concurrently to other code, so locking rules must be
582*4882a593Smuzhiyun * obeyed.
583*4882a593Smuzhiyun *
584*4882a593Smuzhiyun * This is a separate step from interrupt enabling to simplify the locking rules
585*4882a593Smuzhiyun * in the driver load and resume code.
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * Also see: intel_hpd_poll_init(), which enables connector polling
588*4882a593Smuzhiyun */
intel_hpd_init(struct drm_i915_private * dev_priv)589*4882a593Smuzhiyun void intel_hpd_init(struct drm_i915_private *dev_priv)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun int i;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun for_each_hpd_pin(i) {
594*4882a593Smuzhiyun dev_priv->hotplug.stats[i].count = 0;
595*4882a593Smuzhiyun dev_priv->hotplug.stats[i].state = HPD_ENABLED;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
599*4882a593Smuzhiyun schedule_work(&dev_priv->hotplug.poll_init_work);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /*
602*4882a593Smuzhiyun * Interrupt setup is already guaranteed to be single-threaded, this is
603*4882a593Smuzhiyun * just to make the assert_spin_locked checks happy.
604*4882a593Smuzhiyun */
605*4882a593Smuzhiyun if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
606*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
607*4882a593Smuzhiyun if (dev_priv->display_irqs_enabled)
608*4882a593Smuzhiyun dev_priv->display.hpd_irq_setup(dev_priv);
609*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
i915_hpd_poll_init_work(struct work_struct * work)613*4882a593Smuzhiyun static void i915_hpd_poll_init_work(struct work_struct *work)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct drm_i915_private *dev_priv =
616*4882a593Smuzhiyun container_of(work, struct drm_i915_private,
617*4882a593Smuzhiyun hotplug.poll_init_work);
618*4882a593Smuzhiyun struct drm_device *dev = &dev_priv->drm;
619*4882a593Smuzhiyun struct drm_connector_list_iter conn_iter;
620*4882a593Smuzhiyun struct intel_connector *connector;
621*4882a593Smuzhiyun bool enabled;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun mutex_lock(&dev->mode_config.mutex);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun drm_connector_list_iter_begin(dev, &conn_iter);
628*4882a593Smuzhiyun for_each_intel_connector_iter(connector, &conn_iter) {
629*4882a593Smuzhiyun enum hpd_pin pin;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun pin = intel_connector_hpd_pin(connector);
632*4882a593Smuzhiyun if (pin == HPD_NONE)
633*4882a593Smuzhiyun continue;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun connector->base.polled = connector->polled;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
638*4882a593Smuzhiyun connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
639*4882a593Smuzhiyun DRM_CONNECTOR_POLL_DISCONNECT;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun drm_connector_list_iter_end(&conn_iter);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (enabled)
644*4882a593Smuzhiyun drm_kms_helper_poll_enable(dev);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun mutex_unlock(&dev->mode_config.mutex);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /*
649*4882a593Smuzhiyun * We might have missed any hotplugs that happened while we were
650*4882a593Smuzhiyun * in the middle of disabling polling
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun if (!enabled)
653*4882a593Smuzhiyun drm_helper_hpd_irq_event(dev);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /**
657*4882a593Smuzhiyun * intel_hpd_poll_init - enables/disables polling for connectors with hpd
658*4882a593Smuzhiyun * @dev_priv: i915 device instance
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * This function enables polling for all connectors, regardless of whether or
661*4882a593Smuzhiyun * not they support hotplug detection. Under certain conditions HPD may not be
662*4882a593Smuzhiyun * functional. On most Intel GPUs, this happens when we enter runtime suspend.
663*4882a593Smuzhiyun * On Valleyview and Cherryview systems, this also happens when we shut off all
664*4882a593Smuzhiyun * of the powerwells.
665*4882a593Smuzhiyun *
666*4882a593Smuzhiyun * Since this function can get called in contexts where we're already holding
667*4882a593Smuzhiyun * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
668*4882a593Smuzhiyun * worker.
669*4882a593Smuzhiyun *
670*4882a593Smuzhiyun * Also see: intel_hpd_init(), which restores hpd handling.
671*4882a593Smuzhiyun */
intel_hpd_poll_init(struct drm_i915_private * dev_priv)672*4882a593Smuzhiyun void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /*
677*4882a593Smuzhiyun * We might already be holding dev->mode_config.mutex, so do this in a
678*4882a593Smuzhiyun * seperate worker
679*4882a593Smuzhiyun * As well, there's no issue if we race here since we always reschedule
680*4882a593Smuzhiyun * this worker anyway
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun schedule_work(&dev_priv->hotplug.poll_init_work);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
intel_hpd_init_work(struct drm_i915_private * dev_priv)685*4882a593Smuzhiyun void intel_hpd_init_work(struct drm_i915_private *dev_priv)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
688*4882a593Smuzhiyun i915_hotplug_work_func);
689*4882a593Smuzhiyun INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
690*4882a593Smuzhiyun INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
691*4882a593Smuzhiyun INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
692*4882a593Smuzhiyun intel_hpd_irq_storm_reenable_work);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
intel_hpd_cancel_work(struct drm_i915_private * dev_priv)695*4882a593Smuzhiyun void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun dev_priv->hotplug.long_port_mask = 0;
700*4882a593Smuzhiyun dev_priv->hotplug.short_port_mask = 0;
701*4882a593Smuzhiyun dev_priv->hotplug.event_bits = 0;
702*4882a593Smuzhiyun dev_priv->hotplug.retry_bits = 0;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun cancel_work_sync(&dev_priv->hotplug.dig_port_work);
707*4882a593Smuzhiyun cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
708*4882a593Smuzhiyun cancel_work_sync(&dev_priv->hotplug.poll_init_work);
709*4882a593Smuzhiyun cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
intel_hpd_disable(struct drm_i915_private * dev_priv,enum hpd_pin pin)712*4882a593Smuzhiyun bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun bool ret = false;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (pin == HPD_NONE)
717*4882a593Smuzhiyun return false;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
720*4882a593Smuzhiyun if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
721*4882a593Smuzhiyun dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
722*4882a593Smuzhiyun ret = true;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return ret;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
intel_hpd_enable(struct drm_i915_private * dev_priv,enum hpd_pin pin)729*4882a593Smuzhiyun void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun if (pin == HPD_NONE)
732*4882a593Smuzhiyun return;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun spin_lock_irq(&dev_priv->irq_lock);
735*4882a593Smuzhiyun dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
736*4882a593Smuzhiyun spin_unlock_irq(&dev_priv->irq_lock);
737*4882a593Smuzhiyun }
738