1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25
26#include "i915_drv.h"
27#include "intel_display_types.h"
28#include "intel_hotplug.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
88 enum port port)
89{
90 enum phy phy = intel_port_to_phy(dev_priv, port);
91
92
93
94
95
96
97
98 if (IS_ROCKETLAKE(dev_priv) && HAS_PCH_TGP(dev_priv))
99 return HPD_PORT_A + port - PORT_A;
100
101 switch (phy) {
102 case PHY_F:
103 return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
104 case PHY_A ... PHY_E:
105 case PHY_G ... PHY_I:
106 return HPD_PORT_A + phy - PHY_A;
107 default:
108 MISSING_CASE(phy);
109 return HPD_NONE;
110 }
111}
112
113#define HPD_STORM_DETECT_PERIOD 1000
114#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
115#define HPD_RETRY_DELAY 1000
116
117static enum hpd_pin
118intel_connector_hpd_pin(struct intel_connector *connector)
119{
120 struct intel_encoder *encoder = intel_attached_encoder(connector);
121
122
123
124
125
126
127
128 return encoder ? encoder->hpd_pin : HPD_NONE;
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
161 enum hpd_pin pin, bool long_hpd)
162{
163 struct i915_hotplug *hpd = &dev_priv->hotplug;
164 unsigned long start = hpd->stats[pin].last_jiffies;
165 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
166 const int increment = long_hpd ? 10 : 1;
167 const int threshold = hpd->hpd_storm_threshold;
168 bool storm = false;
169
170 if (!threshold ||
171 (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
172 return false;
173
174 if (!time_in_range(jiffies, start, end)) {
175 hpd->stats[pin].last_jiffies = jiffies;
176 hpd->stats[pin].count = 0;
177 }
178
179 hpd->stats[pin].count += increment;
180 if (hpd->stats[pin].count > threshold) {
181 hpd->stats[pin].state = HPD_MARK_DISABLED;
182 drm_dbg_kms(&dev_priv->drm,
183 "HPD interrupt storm detected on PIN %d\n", pin);
184 storm = true;
185 } else {
186 drm_dbg_kms(&dev_priv->drm,
187 "Received HPD interrupt on PIN %d - cnt: %d\n",
188 pin,
189 hpd->stats[pin].count);
190 }
191
192 return storm;
193}
194
195static void
196intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
197{
198 struct drm_device *dev = &dev_priv->drm;
199 struct drm_connector_list_iter conn_iter;
200 struct intel_connector *connector;
201 bool hpd_disabled = false;
202
203 lockdep_assert_held(&dev_priv->irq_lock);
204
205 drm_connector_list_iter_begin(dev, &conn_iter);
206 for_each_intel_connector_iter(connector, &conn_iter) {
207 enum hpd_pin pin;
208
209 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
210 continue;
211
212 pin = intel_connector_hpd_pin(connector);
213 if (pin == HPD_NONE ||
214 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
215 continue;
216
217 drm_info(&dev_priv->drm,
218 "HPD interrupt storm detected on connector %s: "
219 "switching from hotplug detection to polling\n",
220 connector->base.name);
221
222 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
223 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
224 DRM_CONNECTOR_POLL_DISCONNECT;
225 hpd_disabled = true;
226 }
227 drm_connector_list_iter_end(&conn_iter);
228
229
230 if (hpd_disabled) {
231 drm_kms_helper_poll_enable(dev);
232 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
233 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
234 }
235}
236
237static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
238{
239 struct drm_i915_private *dev_priv =
240 container_of(work, typeof(*dev_priv),
241 hotplug.reenable_work.work);
242 struct drm_device *dev = &dev_priv->drm;
243 struct drm_connector_list_iter conn_iter;
244 struct intel_connector *connector;
245 intel_wakeref_t wakeref;
246 enum hpd_pin pin;
247
248 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
249
250 spin_lock_irq(&dev_priv->irq_lock);
251
252 drm_connector_list_iter_begin(dev, &conn_iter);
253 for_each_intel_connector_iter(connector, &conn_iter) {
254 pin = intel_connector_hpd_pin(connector);
255 if (pin == HPD_NONE ||
256 dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
257 continue;
258
259 if (connector->base.polled != connector->polled)
260 drm_dbg(&dev_priv->drm,
261 "Reenabling HPD on connector %s\n",
262 connector->base.name);
263 connector->base.polled = connector->polled;
264 }
265 drm_connector_list_iter_end(&conn_iter);
266
267 for_each_hpd_pin(pin) {
268 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
269 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
270 }
271
272 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
273 dev_priv->display.hpd_irq_setup(dev_priv);
274
275 spin_unlock_irq(&dev_priv->irq_lock);
276
277 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
278}
279
280enum intel_hotplug_state
281intel_encoder_hotplug(struct intel_encoder *encoder,
282 struct intel_connector *connector)
283{
284 struct drm_device *dev = connector->base.dev;
285 enum drm_connector_status old_status;
286 u64 old_epoch_counter;
287 bool ret = false;
288
289 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
290 old_status = connector->base.status;
291 old_epoch_counter = connector->base.epoch_counter;
292
293 connector->base.status =
294 drm_helper_probe_detect(&connector->base, NULL, false);
295
296 if (old_epoch_counter != connector->base.epoch_counter)
297 ret = true;
298
299 if (ret) {
300 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
301 connector->base.base.id,
302 connector->base.name,
303 drm_get_connector_status_name(old_status),
304 drm_get_connector_status_name(connector->base.status),
305 old_epoch_counter,
306 connector->base.epoch_counter);
307 return INTEL_HOTPLUG_CHANGED;
308 }
309 return INTEL_HOTPLUG_UNCHANGED;
310}
311
312static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
313{
314 return intel_encoder_is_dig_port(encoder) &&
315 enc_to_dig_port(encoder)->hpd_pulse != NULL;
316}
317
318static void i915_digport_work_func(struct work_struct *work)
319{
320 struct drm_i915_private *dev_priv =
321 container_of(work, struct drm_i915_private, hotplug.dig_port_work);
322 u32 long_port_mask, short_port_mask;
323 struct intel_encoder *encoder;
324 u32 old_bits = 0;
325
326 spin_lock_irq(&dev_priv->irq_lock);
327 long_port_mask = dev_priv->hotplug.long_port_mask;
328 dev_priv->hotplug.long_port_mask = 0;
329 short_port_mask = dev_priv->hotplug.short_port_mask;
330 dev_priv->hotplug.short_port_mask = 0;
331 spin_unlock_irq(&dev_priv->irq_lock);
332
333 for_each_intel_encoder(&dev_priv->drm, encoder) {
334 struct intel_digital_port *dig_port;
335 enum port port = encoder->port;
336 bool long_hpd, short_hpd;
337 enum irqreturn ret;
338
339 if (!intel_encoder_has_hpd_pulse(encoder))
340 continue;
341
342 long_hpd = long_port_mask & BIT(port);
343 short_hpd = short_port_mask & BIT(port);
344
345 if (!long_hpd && !short_hpd)
346 continue;
347
348 dig_port = enc_to_dig_port(encoder);
349
350 ret = dig_port->hpd_pulse(dig_port, long_hpd);
351 if (ret == IRQ_NONE) {
352
353 old_bits |= BIT(encoder->hpd_pin);
354 }
355 }
356
357 if (old_bits) {
358 spin_lock_irq(&dev_priv->irq_lock);
359 dev_priv->hotplug.event_bits |= old_bits;
360 spin_unlock_irq(&dev_priv->irq_lock);
361 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
362 }
363}
364
365
366
367
368
369
370
371
372void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
373{
374 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
375
376 spin_lock_irq(&i915->irq_lock);
377 i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
378 spin_unlock_irq(&i915->irq_lock);
379
380 queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
381}
382
383
384
385
386static void i915_hotplug_work_func(struct work_struct *work)
387{
388 struct drm_i915_private *dev_priv =
389 container_of(work, struct drm_i915_private,
390 hotplug.hotplug_work.work);
391 struct drm_device *dev = &dev_priv->drm;
392 struct drm_connector_list_iter conn_iter;
393 struct intel_connector *connector;
394 u32 changed = 0, retry = 0;
395 u32 hpd_event_bits;
396 u32 hpd_retry_bits;
397
398 mutex_lock(&dev->mode_config.mutex);
399 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
400
401 spin_lock_irq(&dev_priv->irq_lock);
402
403 hpd_event_bits = dev_priv->hotplug.event_bits;
404 dev_priv->hotplug.event_bits = 0;
405 hpd_retry_bits = dev_priv->hotplug.retry_bits;
406 dev_priv->hotplug.retry_bits = 0;
407
408
409 intel_hpd_irq_storm_switch_to_polling(dev_priv);
410
411 spin_unlock_irq(&dev_priv->irq_lock);
412
413 drm_connector_list_iter_begin(dev, &conn_iter);
414 for_each_intel_connector_iter(connector, &conn_iter) {
415 enum hpd_pin pin;
416 u32 hpd_bit;
417
418 pin = intel_connector_hpd_pin(connector);
419 if (pin == HPD_NONE)
420 continue;
421
422 hpd_bit = BIT(pin);
423 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
424 struct intel_encoder *encoder =
425 intel_attached_encoder(connector);
426
427 if (hpd_event_bits & hpd_bit)
428 connector->hotplug_retries = 0;
429 else
430 connector->hotplug_retries++;
431
432 drm_dbg_kms(&dev_priv->drm,
433 "Connector %s (pin %i) received hotplug event. (retry %d)\n",
434 connector->base.name, pin,
435 connector->hotplug_retries);
436
437 switch (encoder->hotplug(encoder, connector)) {
438 case INTEL_HOTPLUG_UNCHANGED:
439 break;
440 case INTEL_HOTPLUG_CHANGED:
441 changed |= hpd_bit;
442 break;
443 case INTEL_HOTPLUG_RETRY:
444 retry |= hpd_bit;
445 break;
446 }
447 }
448 }
449 drm_connector_list_iter_end(&conn_iter);
450 mutex_unlock(&dev->mode_config.mutex);
451
452 if (changed)
453 drm_kms_helper_hotplug_event(dev);
454
455
456 retry &= ~changed;
457 if (retry) {
458 spin_lock_irq(&dev_priv->irq_lock);
459 dev_priv->hotplug.retry_bits |= retry;
460 spin_unlock_irq(&dev_priv->irq_lock);
461
462 mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
463 msecs_to_jiffies(HPD_RETRY_DELAY));
464 }
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
485 u32 pin_mask, u32 long_mask)
486{
487 struct intel_encoder *encoder;
488 bool storm_detected = false;
489 bool queue_dig = false, queue_hp = false;
490 u32 long_hpd_pulse_mask = 0;
491 u32 short_hpd_pulse_mask = 0;
492 enum hpd_pin pin;
493
494 if (!pin_mask)
495 return;
496
497 spin_lock(&dev_priv->irq_lock);
498
499
500
501
502
503
504
505 for_each_intel_encoder(&dev_priv->drm, encoder) {
506 bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
507 enum port port = encoder->port;
508 bool long_hpd;
509
510 pin = encoder->hpd_pin;
511 if (!(BIT(pin) & pin_mask))
512 continue;
513
514 if (!has_hpd_pulse)
515 continue;
516
517 long_hpd = long_mask & BIT(pin);
518
519 drm_dbg(&dev_priv->drm,
520 "digital hpd on [ENCODER:%d:%s] - %s\n",
521 encoder->base.base.id, encoder->base.name,
522 long_hpd ? "long" : "short");
523 queue_dig = true;
524
525 if (long_hpd) {
526 long_hpd_pulse_mask |= BIT(pin);
527 dev_priv->hotplug.long_port_mask |= BIT(port);
528 } else {
529 short_hpd_pulse_mask |= BIT(pin);
530 dev_priv->hotplug.short_port_mask |= BIT(port);
531 }
532 }
533
534
535 for_each_hpd_pin(pin) {
536 bool long_hpd;
537
538 if (!(BIT(pin) & pin_mask))
539 continue;
540
541 if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
542
543
544
545
546
547
548 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
549 "Received HPD interrupt on pin %d although disabled\n",
550 pin);
551 continue;
552 }
553
554 if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
555 continue;
556
557
558
559
560
561
562 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
563 long_hpd = long_hpd_pulse_mask & BIT(pin);
564 } else {
565 dev_priv->hotplug.event_bits |= BIT(pin);
566 long_hpd = true;
567 queue_hp = true;
568 }
569
570 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
571 dev_priv->hotplug.event_bits &= ~BIT(pin);
572 storm_detected = true;
573 queue_hp = true;
574 }
575 }
576
577
578
579
580
581 if (storm_detected && dev_priv->display_irqs_enabled)
582 dev_priv->display.hpd_irq_setup(dev_priv);
583 spin_unlock(&dev_priv->irq_lock);
584
585
586
587
588
589
590
591 if (queue_dig)
592 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
593 if (queue_hp)
594 queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
595}
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611void intel_hpd_init(struct drm_i915_private *dev_priv)
612{
613 int i;
614
615 for_each_hpd_pin(i) {
616 dev_priv->hotplug.stats[i].count = 0;
617 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
618 }
619
620 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
621 schedule_work(&dev_priv->hotplug.poll_init_work);
622
623
624
625
626
627 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
628 spin_lock_irq(&dev_priv->irq_lock);
629 if (dev_priv->display_irqs_enabled)
630 dev_priv->display.hpd_irq_setup(dev_priv);
631 spin_unlock_irq(&dev_priv->irq_lock);
632 }
633}
634
635static void i915_hpd_poll_init_work(struct work_struct *work)
636{
637 struct drm_i915_private *dev_priv =
638 container_of(work, struct drm_i915_private,
639 hotplug.poll_init_work);
640 struct drm_device *dev = &dev_priv->drm;
641 struct drm_connector_list_iter conn_iter;
642 struct intel_connector *connector;
643 bool enabled;
644
645 mutex_lock(&dev->mode_config.mutex);
646
647 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
648
649 drm_connector_list_iter_begin(dev, &conn_iter);
650 for_each_intel_connector_iter(connector, &conn_iter) {
651 enum hpd_pin pin;
652
653 pin = intel_connector_hpd_pin(connector);
654 if (pin == HPD_NONE)
655 continue;
656
657 connector->base.polled = connector->polled;
658
659 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
660 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
661 DRM_CONNECTOR_POLL_DISCONNECT;
662 }
663 drm_connector_list_iter_end(&conn_iter);
664
665 if (enabled)
666 drm_kms_helper_poll_enable(dev);
667
668 mutex_unlock(&dev->mode_config.mutex);
669
670
671
672
673
674 if (!enabled)
675 drm_helper_hpd_irq_event(dev);
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
695{
696 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
697
698
699
700
701
702
703
704 schedule_work(&dev_priv->hotplug.poll_init_work);
705}
706
707void intel_hpd_init_work(struct drm_i915_private *dev_priv)
708{
709 INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
710 i915_hotplug_work_func);
711 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
712 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
713 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
714 intel_hpd_irq_storm_reenable_work);
715}
716
717void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
718{
719 spin_lock_irq(&dev_priv->irq_lock);
720
721 dev_priv->hotplug.long_port_mask = 0;
722 dev_priv->hotplug.short_port_mask = 0;
723 dev_priv->hotplug.event_bits = 0;
724 dev_priv->hotplug.retry_bits = 0;
725
726 spin_unlock_irq(&dev_priv->irq_lock);
727
728 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
729 cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
730 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
731 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
732}
733
734bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
735{
736 bool ret = false;
737
738 if (pin == HPD_NONE)
739 return false;
740
741 spin_lock_irq(&dev_priv->irq_lock);
742 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
743 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
744 ret = true;
745 }
746 spin_unlock_irq(&dev_priv->irq_lock);
747
748 return ret;
749}
750
751void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
752{
753 if (pin == HPD_NONE)
754 return;
755
756 spin_lock_irq(&dev_priv->irq_lock);
757 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
758 spin_unlock_irq(&dev_priv->irq_lock);
759}
760