1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25
26#include <drm/drmP.h>
27#include <drm/i915_drm.h>
28
29#include "i915_drv.h"
30#include "intel_drv.h"
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
80{
81 switch (pin) {
82 case HPD_PORT_A:
83 *port = PORT_A;
84 return true;
85 case HPD_PORT_B:
86 *port = PORT_B;
87 return true;
88 case HPD_PORT_C:
89 *port = PORT_C;
90 return true;
91 case HPD_PORT_D:
92 *port = PORT_D;
93 return true;
94 case HPD_PORT_E:
95 *port = PORT_E;
96 return true;
97 default:
98 return false;
99 }
100}
101
102#define HPD_STORM_DETECT_PERIOD 1000
103#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
125 enum hpd_pin pin)
126{
127 unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
128 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
129 const int threshold = dev_priv->hotplug.hpd_storm_threshold;
130 bool storm = false;
131
132 if (!time_in_range(jiffies, start, end)) {
133 dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
134 dev_priv->hotplug.stats[pin].count = 0;
135 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
136 } else if (dev_priv->hotplug.stats[pin].count > threshold &&
137 threshold) {
138 dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
139 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
140 storm = true;
141 } else {
142 dev_priv->hotplug.stats[pin].count++;
143 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
144 dev_priv->hotplug.stats[pin].count);
145 }
146
147 return storm;
148}
149
150static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
151{
152 struct drm_device *dev = &dev_priv->drm;
153 struct intel_connector *intel_connector;
154 struct intel_encoder *intel_encoder;
155 struct drm_connector *connector;
156 struct drm_connector_list_iter conn_iter;
157 enum hpd_pin pin;
158 bool hpd_disabled = false;
159
160 lockdep_assert_held(&dev_priv->irq_lock);
161
162 drm_connector_list_iter_begin(dev, &conn_iter);
163 drm_for_each_connector_iter(connector, &conn_iter) {
164 if (connector->polled != DRM_CONNECTOR_POLL_HPD)
165 continue;
166
167 intel_connector = to_intel_connector(connector);
168 intel_encoder = intel_connector->encoder;
169 if (!intel_encoder)
170 continue;
171
172 pin = intel_encoder->hpd_pin;
173 if (pin == HPD_NONE ||
174 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
175 continue;
176
177 DRM_INFO("HPD interrupt storm detected on connector %s: "
178 "switching from hotplug detection to polling\n",
179 connector->name);
180
181 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
182 connector->polled = DRM_CONNECTOR_POLL_CONNECT
183 | DRM_CONNECTOR_POLL_DISCONNECT;
184 hpd_disabled = true;
185 }
186 drm_connector_list_iter_end(&conn_iter);
187
188
189 if (hpd_disabled) {
190 drm_kms_helper_poll_enable(dev);
191 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
192 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
193 }
194}
195
196static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
197{
198 struct drm_i915_private *dev_priv =
199 container_of(work, typeof(*dev_priv),
200 hotplug.reenable_work.work);
201 struct drm_device *dev = &dev_priv->drm;
202 int i;
203
204 intel_runtime_pm_get(dev_priv);
205
206 spin_lock_irq(&dev_priv->irq_lock);
207 for_each_hpd_pin(i) {
208 struct drm_connector *connector;
209 struct drm_connector_list_iter conn_iter;
210
211 if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
212 continue;
213
214 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
215
216 drm_connector_list_iter_begin(dev, &conn_iter);
217 drm_for_each_connector_iter(connector, &conn_iter) {
218 struct intel_connector *intel_connector = to_intel_connector(connector);
219
220 if (intel_connector->encoder->hpd_pin == i) {
221 if (connector->polled != intel_connector->polled)
222 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
223 connector->name);
224 connector->polled = intel_connector->polled;
225 if (!connector->polled)
226 connector->polled = DRM_CONNECTOR_POLL_HPD;
227 }
228 }
229 drm_connector_list_iter_end(&conn_iter);
230 }
231 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
232 dev_priv->display.hpd_irq_setup(dev_priv);
233 spin_unlock_irq(&dev_priv->irq_lock);
234
235 intel_runtime_pm_put(dev_priv);
236}
237
238static bool intel_hpd_irq_event(struct drm_device *dev,
239 struct drm_connector *connector)
240{
241 enum drm_connector_status old_status;
242
243 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
244 old_status = connector->status;
245
246 connector->status = drm_helper_probe_detect(connector, NULL, false);
247
248 if (old_status == connector->status)
249 return false;
250
251 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
252 connector->base.id,
253 connector->name,
254 drm_get_connector_status_name(old_status),
255 drm_get_connector_status_name(connector->status));
256
257 return true;
258}
259
260static void i915_digport_work_func(struct work_struct *work)
261{
262 struct drm_i915_private *dev_priv =
263 container_of(work, struct drm_i915_private, hotplug.dig_port_work);
264 u32 long_port_mask, short_port_mask;
265 struct intel_digital_port *intel_dig_port;
266 int i;
267 u32 old_bits = 0;
268
269 spin_lock_irq(&dev_priv->irq_lock);
270 long_port_mask = dev_priv->hotplug.long_port_mask;
271 dev_priv->hotplug.long_port_mask = 0;
272 short_port_mask = dev_priv->hotplug.short_port_mask;
273 dev_priv->hotplug.short_port_mask = 0;
274 spin_unlock_irq(&dev_priv->irq_lock);
275
276 for (i = 0; i < I915_MAX_PORTS; i++) {
277 bool valid = false;
278 bool long_hpd = false;
279 intel_dig_port = dev_priv->hotplug.irq_port[i];
280 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
281 continue;
282
283 if (long_port_mask & (1 << i)) {
284 valid = true;
285 long_hpd = true;
286 } else if (short_port_mask & (1 << i))
287 valid = true;
288
289 if (valid) {
290 enum irqreturn ret;
291
292 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
293 if (ret == IRQ_NONE) {
294
295 old_bits |= (1 << intel_dig_port->base.hpd_pin);
296 }
297 }
298 }
299
300 if (old_bits) {
301 spin_lock_irq(&dev_priv->irq_lock);
302 dev_priv->hotplug.event_bits |= old_bits;
303 spin_unlock_irq(&dev_priv->irq_lock);
304 schedule_work(&dev_priv->hotplug.hotplug_work);
305 }
306}
307
308
309
310
311static void i915_hotplug_work_func(struct work_struct *work)
312{
313 struct drm_i915_private *dev_priv =
314 container_of(work, struct drm_i915_private, hotplug.hotplug_work);
315 struct drm_device *dev = &dev_priv->drm;
316 struct intel_connector *intel_connector;
317 struct intel_encoder *intel_encoder;
318 struct drm_connector *connector;
319 struct drm_connector_list_iter conn_iter;
320 bool changed = false;
321 u32 hpd_event_bits;
322
323 mutex_lock(&dev->mode_config.mutex);
324 DRM_DEBUG_KMS("running encoder hotplug functions\n");
325
326 spin_lock_irq(&dev_priv->irq_lock);
327
328 hpd_event_bits = dev_priv->hotplug.event_bits;
329 dev_priv->hotplug.event_bits = 0;
330
331
332 intel_hpd_irq_storm_disable(dev_priv);
333
334 spin_unlock_irq(&dev_priv->irq_lock);
335
336 drm_connector_list_iter_begin(dev, &conn_iter);
337 drm_for_each_connector_iter(connector, &conn_iter) {
338 intel_connector = to_intel_connector(connector);
339 if (!intel_connector->encoder)
340 continue;
341 intel_encoder = intel_connector->encoder;
342 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
343 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
344 connector->name, intel_encoder->hpd_pin);
345 if (intel_encoder->hot_plug)
346 intel_encoder->hot_plug(intel_encoder);
347 if (intel_hpd_irq_event(dev, connector))
348 changed = true;
349 }
350 }
351 drm_connector_list_iter_end(&conn_iter);
352 mutex_unlock(&dev->mode_config.mutex);
353
354 if (changed)
355 drm_kms_helper_hotplug_event(dev);
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
376 u32 pin_mask, u32 long_mask)
377{
378 int i;
379 enum port port;
380 bool storm_detected = false;
381 bool queue_dig = false, queue_hp = false;
382 bool is_dig_port;
383
384 if (!pin_mask)
385 return;
386
387 spin_lock(&dev_priv->irq_lock);
388 for_each_hpd_pin(i) {
389 if (!(BIT(i) & pin_mask))
390 continue;
391
392 is_dig_port = intel_hpd_pin_to_port(i, &port) &&
393 dev_priv->hotplug.irq_port[port];
394
395 if (is_dig_port) {
396 bool long_hpd = long_mask & BIT(i);
397
398 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
399 long_hpd ? "long" : "short");
400
401
402
403
404 queue_dig = true;
405 if (long_hpd) {
406 dev_priv->hotplug.long_port_mask |= (1 << port);
407 } else {
408
409 dev_priv->hotplug.short_port_mask |= (1 << port);
410 continue;
411 }
412 }
413
414 if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
415
416
417
418
419
420
421 WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
422 "Received HPD interrupt on pin %d although disabled\n", i);
423 continue;
424 }
425
426 if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
427 continue;
428
429 if (!is_dig_port) {
430 dev_priv->hotplug.event_bits |= BIT(i);
431 queue_hp = true;
432 }
433
434 if (intel_hpd_irq_storm_detect(dev_priv, i)) {
435 dev_priv->hotplug.event_bits &= ~BIT(i);
436 storm_detected = true;
437 }
438 }
439
440 if (storm_detected && dev_priv->display_irqs_enabled)
441 dev_priv->display.hpd_irq_setup(dev_priv);
442 spin_unlock(&dev_priv->irq_lock);
443
444
445
446
447
448
449
450 if (queue_dig)
451 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
452 if (queue_hp)
453 schedule_work(&dev_priv->hotplug.hotplug_work);
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470void intel_hpd_init(struct drm_i915_private *dev_priv)
471{
472 int i;
473
474 for_each_hpd_pin(i) {
475 dev_priv->hotplug.stats[i].count = 0;
476 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
477 }
478
479 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
480 schedule_work(&dev_priv->hotplug.poll_init_work);
481
482
483
484
485
486 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
487 spin_lock_irq(&dev_priv->irq_lock);
488 if (dev_priv->display_irqs_enabled)
489 dev_priv->display.hpd_irq_setup(dev_priv);
490 spin_unlock_irq(&dev_priv->irq_lock);
491 }
492}
493
494static void i915_hpd_poll_init_work(struct work_struct *work)
495{
496 struct drm_i915_private *dev_priv =
497 container_of(work, struct drm_i915_private,
498 hotplug.poll_init_work);
499 struct drm_device *dev = &dev_priv->drm;
500 struct drm_connector *connector;
501 struct drm_connector_list_iter conn_iter;
502 bool enabled;
503
504 mutex_lock(&dev->mode_config.mutex);
505
506 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
507
508 drm_connector_list_iter_begin(dev, &conn_iter);
509 drm_for_each_connector_iter(connector, &conn_iter) {
510 struct intel_connector *intel_connector =
511 to_intel_connector(connector);
512 connector->polled = intel_connector->polled;
513
514
515
516 if (intel_connector->mst_port)
517 continue;
518
519 if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
520 intel_connector->encoder->hpd_pin > HPD_NONE) {
521 connector->polled = enabled ?
522 DRM_CONNECTOR_POLL_CONNECT |
523 DRM_CONNECTOR_POLL_DISCONNECT :
524 DRM_CONNECTOR_POLL_HPD;
525 }
526 }
527 drm_connector_list_iter_end(&conn_iter);
528
529 if (enabled)
530 drm_kms_helper_poll_enable(dev);
531
532 mutex_unlock(&dev->mode_config.mutex);
533
534
535
536
537
538 if (!enabled)
539 drm_helper_hpd_irq_event(dev);
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
559{
560 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
561
562
563
564
565
566
567
568 schedule_work(&dev_priv->hotplug.poll_init_work);
569}
570
571void intel_hpd_init_work(struct drm_i915_private *dev_priv)
572{
573 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
574 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
575 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
576 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
577 intel_hpd_irq_storm_reenable_work);
578}
579
580void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
581{
582 spin_lock_irq(&dev_priv->irq_lock);
583
584 dev_priv->hotplug.long_port_mask = 0;
585 dev_priv->hotplug.short_port_mask = 0;
586 dev_priv->hotplug.event_bits = 0;
587
588 spin_unlock_irq(&dev_priv->irq_lock);
589
590 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
591 cancel_work_sync(&dev_priv->hotplug.hotplug_work);
592 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
593 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
594}
595
596bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
597{
598 bool ret = false;
599
600 if (pin == HPD_NONE)
601 return false;
602
603 spin_lock_irq(&dev_priv->irq_lock);
604 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
605 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
606 ret = true;
607 }
608 spin_unlock_irq(&dev_priv->irq_lock);
609
610 return ret;
611}
612
613void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
614{
615 if (pin == HPD_NONE)
616 return;
617
618 spin_lock_irq(&dev_priv->irq_lock);
619 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
620 spin_unlock_irq(&dev_priv->irq_lock);
621}
622