1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25
26#include "i915_drv.h"
27#include "intel_display_types.h"
28#include "intel_hotplug.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
87 enum port port)
88{
89 return HPD_PORT_A + port - PORT_A;
90}
91
92#define HPD_STORM_DETECT_PERIOD 1000
93#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
94#define HPD_RETRY_DELAY 1000
95
96static enum hpd_pin
97intel_connector_hpd_pin(struct intel_connector *connector)
98{
99 struct intel_encoder *encoder = intel_attached_encoder(connector);
100
101
102
103
104
105
106
107 return encoder ? encoder->hpd_pin : HPD_NONE;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
140 enum hpd_pin pin, bool long_hpd)
141{
142 struct i915_hotplug *hpd = &dev_priv->hotplug;
143 unsigned long start = hpd->stats[pin].last_jiffies;
144 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
145 const int increment = long_hpd ? 10 : 1;
146 const int threshold = hpd->hpd_storm_threshold;
147 bool storm = false;
148
149 if (!threshold ||
150 (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
151 return false;
152
153 if (!time_in_range(jiffies, start, end)) {
154 hpd->stats[pin].last_jiffies = jiffies;
155 hpd->stats[pin].count = 0;
156 }
157
158 hpd->stats[pin].count += increment;
159 if (hpd->stats[pin].count > threshold) {
160 hpd->stats[pin].state = HPD_MARK_DISABLED;
161 drm_dbg_kms(&dev_priv->drm,
162 "HPD interrupt storm detected on PIN %d\n", pin);
163 storm = true;
164 } else {
165 drm_dbg_kms(&dev_priv->drm,
166 "Received HPD interrupt on PIN %d - cnt: %d\n",
167 pin,
168 hpd->stats[pin].count);
169 }
170
171 return storm;
172}
173
174static void
175intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
176{
177 struct drm_device *dev = &dev_priv->drm;
178 struct drm_connector_list_iter conn_iter;
179 struct intel_connector *connector;
180 bool hpd_disabled = false;
181
182 lockdep_assert_held(&dev_priv->irq_lock);
183
184 drm_connector_list_iter_begin(dev, &conn_iter);
185 for_each_intel_connector_iter(connector, &conn_iter) {
186 enum hpd_pin pin;
187
188 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
189 continue;
190
191 pin = intel_connector_hpd_pin(connector);
192 if (pin == HPD_NONE ||
193 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
194 continue;
195
196 drm_info(&dev_priv->drm,
197 "HPD interrupt storm detected on connector %s: "
198 "switching from hotplug detection to polling\n",
199 connector->base.name);
200
201 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
202 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
203 DRM_CONNECTOR_POLL_DISCONNECT;
204 hpd_disabled = true;
205 }
206 drm_connector_list_iter_end(&conn_iter);
207
208
209 if (hpd_disabled) {
210 drm_kms_helper_poll_enable(dev);
211 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
212 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
213 }
214}
215
216static void intel_hpd_irq_setup(struct drm_i915_private *i915)
217{
218 if (i915->display_irqs_enabled && i915->display.hpd_irq_setup)
219 i915->display.hpd_irq_setup(i915);
220}
221
222static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
223{
224 struct drm_i915_private *dev_priv =
225 container_of(work, typeof(*dev_priv),
226 hotplug.reenable_work.work);
227 struct drm_device *dev = &dev_priv->drm;
228 struct drm_connector_list_iter conn_iter;
229 struct intel_connector *connector;
230 intel_wakeref_t wakeref;
231 enum hpd_pin pin;
232
233 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
234
235 spin_lock_irq(&dev_priv->irq_lock);
236
237 drm_connector_list_iter_begin(dev, &conn_iter);
238 for_each_intel_connector_iter(connector, &conn_iter) {
239 pin = intel_connector_hpd_pin(connector);
240 if (pin == HPD_NONE ||
241 dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
242 continue;
243
244 if (connector->base.polled != connector->polled)
245 drm_dbg(&dev_priv->drm,
246 "Reenabling HPD on connector %s\n",
247 connector->base.name);
248 connector->base.polled = connector->polled;
249 }
250 drm_connector_list_iter_end(&conn_iter);
251
252 for_each_hpd_pin(pin) {
253 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
254 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
255 }
256
257 intel_hpd_irq_setup(dev_priv);
258
259 spin_unlock_irq(&dev_priv->irq_lock);
260
261 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
262}
263
264enum intel_hotplug_state
265intel_encoder_hotplug(struct intel_encoder *encoder,
266 struct intel_connector *connector)
267{
268 struct drm_device *dev = connector->base.dev;
269 enum drm_connector_status old_status;
270 u64 old_epoch_counter;
271 bool ret = false;
272
273 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
274 old_status = connector->base.status;
275 old_epoch_counter = connector->base.epoch_counter;
276
277 connector->base.status =
278 drm_helper_probe_detect(&connector->base, NULL, false);
279
280 if (old_epoch_counter != connector->base.epoch_counter)
281 ret = true;
282
283 if (ret) {
284 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
285 connector->base.base.id,
286 connector->base.name,
287 drm_get_connector_status_name(old_status),
288 drm_get_connector_status_name(connector->base.status),
289 old_epoch_counter,
290 connector->base.epoch_counter);
291 return INTEL_HOTPLUG_CHANGED;
292 }
293 return INTEL_HOTPLUG_UNCHANGED;
294}
295
296static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
297{
298 return intel_encoder_is_dig_port(encoder) &&
299 enc_to_dig_port(encoder)->hpd_pulse != NULL;
300}
301
302static void i915_digport_work_func(struct work_struct *work)
303{
304 struct drm_i915_private *dev_priv =
305 container_of(work, struct drm_i915_private, hotplug.dig_port_work);
306 u32 long_port_mask, short_port_mask;
307 struct intel_encoder *encoder;
308 u32 old_bits = 0;
309
310 spin_lock_irq(&dev_priv->irq_lock);
311 long_port_mask = dev_priv->hotplug.long_port_mask;
312 dev_priv->hotplug.long_port_mask = 0;
313 short_port_mask = dev_priv->hotplug.short_port_mask;
314 dev_priv->hotplug.short_port_mask = 0;
315 spin_unlock_irq(&dev_priv->irq_lock);
316
317 for_each_intel_encoder(&dev_priv->drm, encoder) {
318 struct intel_digital_port *dig_port;
319 enum port port = encoder->port;
320 bool long_hpd, short_hpd;
321 enum irqreturn ret;
322
323 if (!intel_encoder_has_hpd_pulse(encoder))
324 continue;
325
326 long_hpd = long_port_mask & BIT(port);
327 short_hpd = short_port_mask & BIT(port);
328
329 if (!long_hpd && !short_hpd)
330 continue;
331
332 dig_port = enc_to_dig_port(encoder);
333
334 ret = dig_port->hpd_pulse(dig_port, long_hpd);
335 if (ret == IRQ_NONE) {
336
337 old_bits |= BIT(encoder->hpd_pin);
338 }
339 }
340
341 if (old_bits) {
342 spin_lock_irq(&dev_priv->irq_lock);
343 dev_priv->hotplug.event_bits |= old_bits;
344 spin_unlock_irq(&dev_priv->irq_lock);
345 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
346 }
347}
348
349
350
351
352
353
354
355
356void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
357{
358 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
359
360 spin_lock_irq(&i915->irq_lock);
361 i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
362 spin_unlock_irq(&i915->irq_lock);
363
364 queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
365}
366
367
368
369
370static void i915_hotplug_work_func(struct work_struct *work)
371{
372 struct drm_i915_private *dev_priv =
373 container_of(work, struct drm_i915_private,
374 hotplug.hotplug_work.work);
375 struct drm_device *dev = &dev_priv->drm;
376 struct drm_connector_list_iter conn_iter;
377 struct intel_connector *connector;
378 u32 changed = 0, retry = 0;
379 u32 hpd_event_bits;
380 u32 hpd_retry_bits;
381
382 mutex_lock(&dev->mode_config.mutex);
383 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
384
385 spin_lock_irq(&dev_priv->irq_lock);
386
387 hpd_event_bits = dev_priv->hotplug.event_bits;
388 dev_priv->hotplug.event_bits = 0;
389 hpd_retry_bits = dev_priv->hotplug.retry_bits;
390 dev_priv->hotplug.retry_bits = 0;
391
392
393 intel_hpd_irq_storm_switch_to_polling(dev_priv);
394
395 spin_unlock_irq(&dev_priv->irq_lock);
396
397 drm_connector_list_iter_begin(dev, &conn_iter);
398 for_each_intel_connector_iter(connector, &conn_iter) {
399 enum hpd_pin pin;
400 u32 hpd_bit;
401
402 pin = intel_connector_hpd_pin(connector);
403 if (pin == HPD_NONE)
404 continue;
405
406 hpd_bit = BIT(pin);
407 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
408 struct intel_encoder *encoder =
409 intel_attached_encoder(connector);
410
411 if (hpd_event_bits & hpd_bit)
412 connector->hotplug_retries = 0;
413 else
414 connector->hotplug_retries++;
415
416 drm_dbg_kms(&dev_priv->drm,
417 "Connector %s (pin %i) received hotplug event. (retry %d)\n",
418 connector->base.name, pin,
419 connector->hotplug_retries);
420
421 switch (encoder->hotplug(encoder, connector)) {
422 case INTEL_HOTPLUG_UNCHANGED:
423 break;
424 case INTEL_HOTPLUG_CHANGED:
425 changed |= hpd_bit;
426 break;
427 case INTEL_HOTPLUG_RETRY:
428 retry |= hpd_bit;
429 break;
430 }
431 }
432 }
433 drm_connector_list_iter_end(&conn_iter);
434 mutex_unlock(&dev->mode_config.mutex);
435
436 if (changed)
437 drm_kms_helper_hotplug_event(dev);
438
439
440 retry &= ~changed;
441 if (retry) {
442 spin_lock_irq(&dev_priv->irq_lock);
443 dev_priv->hotplug.retry_bits |= retry;
444 spin_unlock_irq(&dev_priv->irq_lock);
445
446 mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
447 msecs_to_jiffies(HPD_RETRY_DELAY));
448 }
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
469 u32 pin_mask, u32 long_mask)
470{
471 struct intel_encoder *encoder;
472 bool storm_detected = false;
473 bool queue_dig = false, queue_hp = false;
474 u32 long_hpd_pulse_mask = 0;
475 u32 short_hpd_pulse_mask = 0;
476 enum hpd_pin pin;
477
478 if (!pin_mask)
479 return;
480
481 spin_lock(&dev_priv->irq_lock);
482
483
484
485
486
487
488
489 for_each_intel_encoder(&dev_priv->drm, encoder) {
490 enum port port = encoder->port;
491 bool long_hpd;
492
493 pin = encoder->hpd_pin;
494 if (!(BIT(pin) & pin_mask))
495 continue;
496
497 if (!intel_encoder_has_hpd_pulse(encoder))
498 continue;
499
500 long_hpd = long_mask & BIT(pin);
501
502 drm_dbg(&dev_priv->drm,
503 "digital hpd on [ENCODER:%d:%s] - %s\n",
504 encoder->base.base.id, encoder->base.name,
505 long_hpd ? "long" : "short");
506 queue_dig = true;
507
508 if (long_hpd) {
509 long_hpd_pulse_mask |= BIT(pin);
510 dev_priv->hotplug.long_port_mask |= BIT(port);
511 } else {
512 short_hpd_pulse_mask |= BIT(pin);
513 dev_priv->hotplug.short_port_mask |= BIT(port);
514 }
515 }
516
517
518 for_each_hpd_pin(pin) {
519 bool long_hpd;
520
521 if (!(BIT(pin) & pin_mask))
522 continue;
523
524 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
525
526
527
528
529
530
531 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
532 "Received HPD interrupt on pin %d although disabled\n",
533 pin);
534 continue;
535 }
536
537 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
538 continue;
539
540
541
542
543
544
545 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
546 long_hpd = long_hpd_pulse_mask & BIT(pin);
547 } else {
548 dev_priv->hotplug.event_bits |= BIT(pin);
549 long_hpd = true;
550 queue_hp = true;
551 }
552
553 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
554 dev_priv->hotplug.event_bits &= ~BIT(pin);
555 storm_detected = true;
556 queue_hp = true;
557 }
558 }
559
560
561
562
563
564 if (storm_detected)
565 intel_hpd_irq_setup(dev_priv);
566 spin_unlock(&dev_priv->irq_lock);
567
568
569
570
571
572
573
574 if (queue_dig)
575 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
576 if (queue_hp)
577 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594void intel_hpd_init(struct drm_i915_private *dev_priv)
595{
596 int i;
597
598 if (!HAS_DISPLAY(dev_priv))
599 return;
600
601 for_each_hpd_pin(i) {
602 dev_priv->hotplug.stats[i].count = 0;
603 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
604 }
605
606
607
608
609
610 spin_lock_irq(&dev_priv->irq_lock);
611 intel_hpd_irq_setup(dev_priv);
612 spin_unlock_irq(&dev_priv->irq_lock);
613}
614
615static void i915_hpd_poll_init_work(struct work_struct *work)
616{
617 struct drm_i915_private *dev_priv =
618 container_of(work, struct drm_i915_private,
619 hotplug.poll_init_work);
620 struct drm_device *dev = &dev_priv->drm;
621 struct drm_connector_list_iter conn_iter;
622 struct intel_connector *connector;
623 bool enabled;
624
625 mutex_lock(&dev->mode_config.mutex);
626
627 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
628
629 drm_connector_list_iter_begin(dev, &conn_iter);
630 for_each_intel_connector_iter(connector, &conn_iter) {
631 enum hpd_pin pin;
632
633 pin = intel_connector_hpd_pin(connector);
634 if (pin == HPD_NONE)
635 continue;
636
637 connector->base.polled = connector->polled;
638
639 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
640 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
641 DRM_CONNECTOR_POLL_DISCONNECT;
642 }
643 drm_connector_list_iter_end(&conn_iter);
644
645 if (enabled)
646 drm_kms_helper_poll_enable(dev);
647
648 mutex_unlock(&dev->mode_config.mutex);
649
650
651
652
653
654 if (!enabled)
655 drm_helper_hpd_irq_event(dev);
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
675{
676 if (!HAS_DISPLAY(dev_priv))
677 return;
678
679 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
680
681
682
683
684
685
686
687 schedule_work(&dev_priv->hotplug.poll_init_work);
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
710{
711 if (!HAS_DISPLAY(dev_priv))
712 return;
713
714 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
715 schedule_work(&dev_priv->hotplug.poll_init_work);
716}
717
718void intel_hpd_init_work(struct drm_i915_private *dev_priv)
719{
720 INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
721 i915_hotplug_work_func);
722 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
723 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
724 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
725 intel_hpd_irq_storm_reenable_work);
726}
727
728void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
729{
730 if (!HAS_DISPLAY(dev_priv))
731 return;
732
733 spin_lock_irq(&dev_priv->irq_lock);
734
735 dev_priv->hotplug.long_port_mask = 0;
736 dev_priv->hotplug.short_port_mask = 0;
737 dev_priv->hotplug.event_bits = 0;
738 dev_priv->hotplug.retry_bits = 0;
739
740 spin_unlock_irq(&dev_priv->irq_lock);
741
742 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
743 cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
744 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
745 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
746}
747
748bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
749{
750 bool ret = false;
751
752 if (pin == HPD_NONE)
753 return false;
754
755 spin_lock_irq(&dev_priv->irq_lock);
756 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
757 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
758 ret = true;
759 }
760 spin_unlock_irq(&dev_priv->irq_lock);
761
762 return ret;
763}
764
765void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
766{
767 if (pin == HPD_NONE)
768 return;
769
770 spin_lock_irq(&dev_priv->irq_lock);
771 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
772 spin_unlock_irq(&dev_priv->irq_lock);
773}
774