1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25
26#include <drm/drmP.h>
27#include <drm/i915_drm.h>
28
29#include "i915_drv.h"
30#include "intel_drv.h"
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86enum port intel_hpd_pin_to_port(enum hpd_pin pin)
87{
88 switch (pin) {
89 case HPD_PORT_A:
90 return PORT_A;
91 case HPD_PORT_B:
92 return PORT_B;
93 case HPD_PORT_C:
94 return PORT_C;
95 case HPD_PORT_D:
96 return PORT_D;
97 case HPD_PORT_E:
98 return PORT_E;
99 default:
100 return PORT_NONE;
101 }
102}
103
104
105
106
107
108
109
110
111enum hpd_pin intel_hpd_pin(enum port port)
112{
113 switch (port) {
114 case PORT_A:
115 return HPD_PORT_A;
116 case PORT_B:
117 return HPD_PORT_B;
118 case PORT_C:
119 return HPD_PORT_C;
120 case PORT_D:
121 return HPD_PORT_D;
122 case PORT_E:
123 return HPD_PORT_E;
124 default:
125 MISSING_CASE(port);
126 return HPD_NONE;
127 }
128}
129
130#define HPD_STORM_DETECT_PERIOD 1000
131#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
153 enum hpd_pin pin)
154{
155 unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
156 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
157 const int threshold = dev_priv->hotplug.hpd_storm_threshold;
158 bool storm = false;
159
160 if (!time_in_range(jiffies, start, end)) {
161 dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
162 dev_priv->hotplug.stats[pin].count = 0;
163 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
164 } else if (dev_priv->hotplug.stats[pin].count > threshold &&
165 threshold) {
166 dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
167 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
168 storm = true;
169 } else {
170 dev_priv->hotplug.stats[pin].count++;
171 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
172 dev_priv->hotplug.stats[pin].count);
173 }
174
175 return storm;
176}
177
178static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
179{
180 struct drm_device *dev = &dev_priv->drm;
181 struct intel_connector *intel_connector;
182 struct intel_encoder *intel_encoder;
183 struct drm_connector *connector;
184 struct drm_connector_list_iter conn_iter;
185 enum hpd_pin pin;
186 bool hpd_disabled = false;
187
188 lockdep_assert_held(&dev_priv->irq_lock);
189
190 drm_connector_list_iter_begin(dev, &conn_iter);
191 drm_for_each_connector_iter(connector, &conn_iter) {
192 if (connector->polled != DRM_CONNECTOR_POLL_HPD)
193 continue;
194
195 intel_connector = to_intel_connector(connector);
196 intel_encoder = intel_connector->encoder;
197 if (!intel_encoder)
198 continue;
199
200 pin = intel_encoder->hpd_pin;
201 if (pin == HPD_NONE ||
202 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
203 continue;
204
205 DRM_INFO("HPD interrupt storm detected on connector %s: "
206 "switching from hotplug detection to polling\n",
207 connector->name);
208
209 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
210 connector->polled = DRM_CONNECTOR_POLL_CONNECT
211 | DRM_CONNECTOR_POLL_DISCONNECT;
212 hpd_disabled = true;
213 }
214 drm_connector_list_iter_end(&conn_iter);
215
216
217 if (hpd_disabled) {
218 drm_kms_helper_poll_enable(dev);
219 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
220 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
221 }
222}
223
224static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
225{
226 struct drm_i915_private *dev_priv =
227 container_of(work, typeof(*dev_priv),
228 hotplug.reenable_work.work);
229 struct drm_device *dev = &dev_priv->drm;
230 int i;
231
232 intel_runtime_pm_get(dev_priv);
233
234 spin_lock_irq(&dev_priv->irq_lock);
235 for_each_hpd_pin(i) {
236 struct drm_connector *connector;
237 struct drm_connector_list_iter conn_iter;
238
239 if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
240 continue;
241
242 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
243
244 drm_connector_list_iter_begin(dev, &conn_iter);
245 drm_for_each_connector_iter(connector, &conn_iter) {
246 struct intel_connector *intel_connector = to_intel_connector(connector);
247
248 if (intel_connector->encoder->hpd_pin == i) {
249 if (connector->polled != intel_connector->polled)
250 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
251 connector->name);
252 connector->polled = intel_connector->polled;
253 if (!connector->polled)
254 connector->polled = DRM_CONNECTOR_POLL_HPD;
255 }
256 }
257 drm_connector_list_iter_end(&conn_iter);
258 }
259 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
260 dev_priv->display.hpd_irq_setup(dev_priv);
261 spin_unlock_irq(&dev_priv->irq_lock);
262
263 intel_runtime_pm_put(dev_priv);
264}
265
266static bool intel_hpd_irq_event(struct drm_device *dev,
267 struct drm_connector *connector)
268{
269 enum drm_connector_status old_status;
270
271 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
272 old_status = connector->status;
273
274 connector->status = drm_helper_probe_detect(connector, NULL, false);
275
276 if (old_status == connector->status)
277 return false;
278
279 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
280 connector->base.id,
281 connector->name,
282 drm_get_connector_status_name(old_status),
283 drm_get_connector_status_name(connector->status));
284
285 return true;
286}
287
288static void i915_digport_work_func(struct work_struct *work)
289{
290 struct drm_i915_private *dev_priv =
291 container_of(work, struct drm_i915_private, hotplug.dig_port_work);
292 u32 long_port_mask, short_port_mask;
293 struct intel_digital_port *intel_dig_port;
294 int i;
295 u32 old_bits = 0;
296
297 spin_lock_irq(&dev_priv->irq_lock);
298 long_port_mask = dev_priv->hotplug.long_port_mask;
299 dev_priv->hotplug.long_port_mask = 0;
300 short_port_mask = dev_priv->hotplug.short_port_mask;
301 dev_priv->hotplug.short_port_mask = 0;
302 spin_unlock_irq(&dev_priv->irq_lock);
303
304 for (i = 0; i < I915_MAX_PORTS; i++) {
305 bool valid = false;
306 bool long_hpd = false;
307 intel_dig_port = dev_priv->hotplug.irq_port[i];
308 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
309 continue;
310
311 if (long_port_mask & (1 << i)) {
312 valid = true;
313 long_hpd = true;
314 } else if (short_port_mask & (1 << i))
315 valid = true;
316
317 if (valid) {
318 enum irqreturn ret;
319
320 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
321 if (ret == IRQ_NONE) {
322
323 old_bits |= (1 << intel_dig_port->base.hpd_pin);
324 }
325 }
326 }
327
328 if (old_bits) {
329 spin_lock_irq(&dev_priv->irq_lock);
330 dev_priv->hotplug.event_bits |= old_bits;
331 spin_unlock_irq(&dev_priv->irq_lock);
332 schedule_work(&dev_priv->hotplug.hotplug_work);
333 }
334}
335
336
337
338
339static void i915_hotplug_work_func(struct work_struct *work)
340{
341 struct drm_i915_private *dev_priv =
342 container_of(work, struct drm_i915_private, hotplug.hotplug_work);
343 struct drm_device *dev = &dev_priv->drm;
344 struct intel_connector *intel_connector;
345 struct intel_encoder *intel_encoder;
346 struct drm_connector *connector;
347 struct drm_connector_list_iter conn_iter;
348 bool changed = false;
349 u32 hpd_event_bits;
350
351 mutex_lock(&dev->mode_config.mutex);
352 DRM_DEBUG_KMS("running encoder hotplug functions\n");
353
354 spin_lock_irq(&dev_priv->irq_lock);
355
356 hpd_event_bits = dev_priv->hotplug.event_bits;
357 dev_priv->hotplug.event_bits = 0;
358
359
360 intel_hpd_irq_storm_disable(dev_priv);
361
362 spin_unlock_irq(&dev_priv->irq_lock);
363
364 drm_connector_list_iter_begin(dev, &conn_iter);
365 drm_for_each_connector_iter(connector, &conn_iter) {
366 intel_connector = to_intel_connector(connector);
367 if (!intel_connector->encoder)
368 continue;
369 intel_encoder = intel_connector->encoder;
370 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
371 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
372 connector->name, intel_encoder->hpd_pin);
373 if (intel_encoder->hot_plug)
374 intel_encoder->hot_plug(intel_encoder);
375 if (intel_hpd_irq_event(dev, connector))
376 changed = true;
377 }
378 }
379 drm_connector_list_iter_end(&conn_iter);
380 mutex_unlock(&dev->mode_config.mutex);
381
382 if (changed)
383 drm_kms_helper_hotplug_event(dev);
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
404 u32 pin_mask, u32 long_mask)
405{
406 int i;
407 enum port port;
408 bool storm_detected = false;
409 bool queue_dig = false, queue_hp = false;
410 bool is_dig_port;
411
412 if (!pin_mask)
413 return;
414
415 spin_lock(&dev_priv->irq_lock);
416 for_each_hpd_pin(i) {
417 if (!(BIT(i) & pin_mask))
418 continue;
419
420 port = intel_hpd_pin_to_port(i);
421 is_dig_port = port != PORT_NONE &&
422 dev_priv->hotplug.irq_port[port];
423
424 if (is_dig_port) {
425 bool long_hpd = long_mask & BIT(i);
426
427 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
428 long_hpd ? "long" : "short");
429
430
431
432
433 queue_dig = true;
434 if (long_hpd) {
435 dev_priv->hotplug.long_port_mask |= (1 << port);
436 } else {
437
438 dev_priv->hotplug.short_port_mask |= (1 << port);
439 continue;
440 }
441 }
442
443 if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
444
445
446
447
448
449
450 WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
451 "Received HPD interrupt on pin %d although disabled\n", i);
452 continue;
453 }
454
455 if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
456 continue;
457
458 if (!is_dig_port) {
459 dev_priv->hotplug.event_bits |= BIT(i);
460 queue_hp = true;
461 }
462
463 if (intel_hpd_irq_storm_detect(dev_priv, i)) {
464 dev_priv->hotplug.event_bits &= ~BIT(i);
465 storm_detected = true;
466 }
467 }
468
469 if (storm_detected && dev_priv->display_irqs_enabled)
470 dev_priv->display.hpd_irq_setup(dev_priv);
471 spin_unlock(&dev_priv->irq_lock);
472
473
474
475
476
477
478
479 if (queue_dig)
480 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
481 if (queue_hp)
482 schedule_work(&dev_priv->hotplug.hotplug_work);
483}
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499void intel_hpd_init(struct drm_i915_private *dev_priv)
500{
501 int i;
502
503 for_each_hpd_pin(i) {
504 dev_priv->hotplug.stats[i].count = 0;
505 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
506 }
507
508 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
509 schedule_work(&dev_priv->hotplug.poll_init_work);
510
511
512
513
514
515 if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
516 spin_lock_irq(&dev_priv->irq_lock);
517 if (dev_priv->display_irqs_enabled)
518 dev_priv->display.hpd_irq_setup(dev_priv);
519 spin_unlock_irq(&dev_priv->irq_lock);
520 }
521}
522
523static void i915_hpd_poll_init_work(struct work_struct *work)
524{
525 struct drm_i915_private *dev_priv =
526 container_of(work, struct drm_i915_private,
527 hotplug.poll_init_work);
528 struct drm_device *dev = &dev_priv->drm;
529 struct drm_connector *connector;
530 struct drm_connector_list_iter conn_iter;
531 bool enabled;
532
533 mutex_lock(&dev->mode_config.mutex);
534
535 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
536
537 drm_connector_list_iter_begin(dev, &conn_iter);
538 drm_for_each_connector_iter(connector, &conn_iter) {
539 struct intel_connector *intel_connector =
540 to_intel_connector(connector);
541 connector->polled = intel_connector->polled;
542
543
544
545 if (intel_connector->mst_port)
546 continue;
547
548 if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
549 intel_connector->encoder->hpd_pin > HPD_NONE) {
550 connector->polled = enabled ?
551 DRM_CONNECTOR_POLL_CONNECT |
552 DRM_CONNECTOR_POLL_DISCONNECT :
553 DRM_CONNECTOR_POLL_HPD;
554 }
555 }
556 drm_connector_list_iter_end(&conn_iter);
557
558 if (enabled)
559 drm_kms_helper_poll_enable(dev);
560
561 mutex_unlock(&dev->mode_config.mutex);
562
563
564
565
566
567 if (!enabled)
568 drm_helper_hpd_irq_event(dev);
569}
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
588{
589 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
590
591
592
593
594
595
596
597 schedule_work(&dev_priv->hotplug.poll_init_work);
598}
599
600void intel_hpd_init_work(struct drm_i915_private *dev_priv)
601{
602 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
603 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
604 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
605 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
606 intel_hpd_irq_storm_reenable_work);
607}
608
609void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
610{
611 spin_lock_irq(&dev_priv->irq_lock);
612
613 dev_priv->hotplug.long_port_mask = 0;
614 dev_priv->hotplug.short_port_mask = 0;
615 dev_priv->hotplug.event_bits = 0;
616
617 spin_unlock_irq(&dev_priv->irq_lock);
618
619 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
620 cancel_work_sync(&dev_priv->hotplug.hotplug_work);
621 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
622 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
623}
624
625bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
626{
627 bool ret = false;
628
629 if (pin == HPD_NONE)
630 return false;
631
632 spin_lock_irq(&dev_priv->irq_lock);
633 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
634 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
635 ret = true;
636 }
637 spin_unlock_irq(&dev_priv->irq_lock);
638
639 return ret;
640}
641
642void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
643{
644 if (pin == HPD_NONE)
645 return;
646
647 spin_lock_irq(&dev_priv->irq_lock);
648 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
649 spin_unlock_irq(&dev_priv->irq_lock);
650}
651