1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "amdgpu.h"
30#include "amdgpu_dm.h"
31#include "amdgpu_dm_irq.h"
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78struct amdgpu_dm_irq_handler_data {
79 struct list_head list;
80 interrupt_handler handler;
81 void *handler_arg;
82
83 struct amdgpu_display_manager *dm;
84
85 enum dc_irq_source irq_source;
86 struct work_struct work;
87};
88
89#define DM_IRQ_TABLE_LOCK(adev, flags) \
90 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
91
92#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
93 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
94
95
96
97
98
99static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
100 void (*ih)(void *),
101 void *args,
102 struct amdgpu_display_manager *dm)
103{
104 hcd->handler = ih;
105 hcd->handler_arg = args;
106 hcd->dm = dm;
107}
108
109
110
111
112
113
114static void dm_irq_work_func(struct work_struct *work)
115{
116 struct amdgpu_dm_irq_handler_data *handler_data =
117 container_of(work, struct amdgpu_dm_irq_handler_data, work);
118
119 handler_data->handler(handler_data->handler_arg);
120
121
122
123
124}
125
126
127
128
129
130static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
131 void *ih,
132 const struct dc_interrupt_params *int_params)
133{
134 struct list_head *hnd_list;
135 struct list_head *entry, *tmp;
136 struct amdgpu_dm_irq_handler_data *handler;
137 unsigned long irq_table_flags;
138 bool handler_removed = false;
139 enum dc_irq_source irq_source;
140
141 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
142
143 irq_source = int_params->irq_source;
144
145 switch (int_params->int_context) {
146 case INTERRUPT_HIGH_IRQ_CONTEXT:
147 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
148 break;
149 case INTERRUPT_LOW_IRQ_CONTEXT:
150 default:
151 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
152 break;
153 }
154
155 list_for_each_safe(entry, tmp, hnd_list) {
156
157 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
158 list);
159
160 if (handler == NULL)
161 continue;
162
163 if (ih == handler->handler) {
164
165 list_del(&handler->list);
166 handler_removed = true;
167 break;
168 }
169 }
170
171 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
172
173 if (handler_removed == false) {
174
175
176 return NULL;
177 }
178
179 kfree(handler);
180
181 DRM_DEBUG_KMS(
182 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
183 ih, int_params->irq_source, int_params->int_context);
184
185 return hnd_list;
186}
187
188
189
190
191
192
193
194static void unregister_all_irq_handlers(struct amdgpu_device *adev)
195{
196 struct list_head *hnd_list_low;
197 struct list_head *hnd_list_high;
198 struct list_head *entry, *tmp;
199 struct amdgpu_dm_irq_handler_data *handler;
200 unsigned long irq_table_flags;
201 int i;
202
203 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
204
205 for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
206 hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
207 hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
208
209 list_for_each_safe(entry, tmp, hnd_list_low) {
210
211 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
212 list);
213
214 if (handler == NULL || handler->handler == NULL)
215 continue;
216
217 list_del(&handler->list);
218 kfree(handler);
219 }
220
221 list_for_each_safe(entry, tmp, hnd_list_high) {
222
223 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
224 list);
225
226 if (handler == NULL || handler->handler == NULL)
227 continue;
228
229 list_del(&handler->list);
230 kfree(handler);
231 }
232 }
233
234 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
235}
236
237static bool
238validate_irq_registration_params(struct dc_interrupt_params *int_params,
239 void (*ih)(void *))
240{
241 if (NULL == int_params || NULL == ih) {
242 DRM_ERROR("DM_IRQ: invalid input!\n");
243 return false;
244 }
245
246 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
247 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
248 int_params->int_context);
249 return false;
250 }
251
252 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
253 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
254 int_params->irq_source);
255 return false;
256 }
257
258 return true;
259}
260
261static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
262 irq_handler_idx handler_idx)
263{
264 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
265 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
266 return false;
267 }
268
269 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
270 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
271 return false;
272 }
273
274 return true;
275}
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
301 struct dc_interrupt_params *int_params,
302 void (*ih)(void *),
303 void *handler_args)
304{
305 struct list_head *hnd_list;
306 struct amdgpu_dm_irq_handler_data *handler_data;
307 unsigned long irq_table_flags;
308 enum dc_irq_source irq_source;
309
310 if (false == validate_irq_registration_params(int_params, ih))
311 return DAL_INVALID_IRQ_HANDLER_IDX;
312
313 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
314 if (!handler_data) {
315 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
316 return DAL_INVALID_IRQ_HANDLER_IDX;
317 }
318
319 init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
320
321 irq_source = int_params->irq_source;
322
323 handler_data->irq_source = irq_source;
324
325
326 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
327
328 switch (int_params->int_context) {
329 case INTERRUPT_HIGH_IRQ_CONTEXT:
330 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
331 break;
332 case INTERRUPT_LOW_IRQ_CONTEXT:
333 default:
334 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
335 INIT_WORK(&handler_data->work, dm_irq_work_func);
336 break;
337 }
338
339 list_add_tail(&handler_data->list, hnd_list);
340
341 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
342
343
344
345
346
347
348 DRM_DEBUG_KMS(
349 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
350 handler_data,
351 irq_source,
352 int_params->int_context);
353
354 return handler_data;
355}
356
357
358
359
360
361
362
363
364
365
366void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
367 enum dc_irq_source irq_source,
368 void *ih)
369{
370 struct list_head *handler_list;
371 struct dc_interrupt_params int_params;
372 int i;
373
374 if (false == validate_irq_unregistration_params(irq_source, ih))
375 return;
376
377 memset(&int_params, 0, sizeof(int_params));
378
379 int_params.irq_source = irq_source;
380
381 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
382
383 int_params.int_context = i;
384
385 handler_list = remove_irq_handler(adev, ih, &int_params);
386
387 if (handler_list != NULL)
388 break;
389 }
390
391 if (handler_list == NULL) {
392
393
394 DRM_ERROR(
395 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
396 ih, irq_source);
397 }
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414int amdgpu_dm_irq_init(struct amdgpu_device *adev)
415{
416 int src;
417 struct list_head *lh;
418
419 DRM_DEBUG_KMS("DM_IRQ\n");
420
421 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
422
423 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
424
425 lh = &adev->dm.irq_handler_list_low_tab[src];
426 INIT_LIST_HEAD(lh);
427
428 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
429 }
430
431 return 0;
432}
433
434
435
436
437
438
439
440void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
441{
442 int src;
443 struct list_head *lh;
444 struct list_head *entry, *tmp;
445 struct amdgpu_dm_irq_handler_data *handler;
446 unsigned long irq_table_flags;
447
448 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
449 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
450 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
451
452
453
454 lh = &adev->dm.irq_handler_list_low_tab[src];
455 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
456
457 if (!list_empty(lh)) {
458 list_for_each_safe(entry, tmp, lh) {
459 handler = list_entry(
460 entry,
461 struct amdgpu_dm_irq_handler_data,
462 list);
463 flush_work(&handler->work);
464 }
465 }
466 }
467
468 unregister_all_irq_handlers(adev);
469}
470
471int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
472{
473 int src;
474 struct list_head *hnd_list_h;
475 struct list_head *hnd_list_l;
476 unsigned long irq_table_flags;
477 struct list_head *entry, *tmp;
478 struct amdgpu_dm_irq_handler_data *handler;
479
480 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
481
482 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
483
484
485
486
487
488 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
489 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
490 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
491 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
492 dc_interrupt_set(adev->dm.dc, src, false);
493
494 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
495
496 if (!list_empty(hnd_list_l)) {
497 list_for_each_safe (entry, tmp, hnd_list_l) {
498 handler = list_entry(
499 entry,
500 struct amdgpu_dm_irq_handler_data,
501 list);
502 flush_work(&handler->work);
503 }
504 }
505 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
506 }
507
508 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
509 return 0;
510}
511
512int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
513{
514 int src;
515 struct list_head *hnd_list_h, *hnd_list_l;
516 unsigned long irq_table_flags;
517
518 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
519
520 DRM_DEBUG_KMS("DM_IRQ: early resume\n");
521
522
523 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
524 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
525 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
526 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
527 dc_interrupt_set(adev->dm.dc, src, true);
528 }
529
530 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
531
532 return 0;
533}
534
535int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
536{
537 int src;
538 struct list_head *hnd_list_h, *hnd_list_l;
539 unsigned long irq_table_flags;
540
541 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
542
543 DRM_DEBUG_KMS("DM_IRQ: resume\n");
544
545
546
547
548
549 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
550 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
551 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
552 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
553 dc_interrupt_set(adev->dm.dc, src, true);
554 }
555
556 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
557 return 0;
558}
559
560
561
562
563
564static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
565 enum dc_irq_source irq_source)
566{
567 struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
568 struct amdgpu_dm_irq_handler_data *handler_data;
569 bool work_queued = false;
570
571 if (list_empty(handler_list))
572 return;
573
574 list_for_each_entry (handler_data, handler_list, list) {
575 if (queue_work(system_highpri_wq, &handler_data->work)) {
576 work_queued = true;
577 break;
578 }
579 }
580
581 if (!work_queued) {
582 struct amdgpu_dm_irq_handler_data *handler_data_add;
583
584 handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
585
586
587 handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
588 if (!handler_data_add) {
589 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
590 return;
591 }
592
593
594 handler_data_add->handler = handler_data->handler;
595 handler_data_add->handler_arg = handler_data->handler_arg;
596 handler_data_add->dm = handler_data->dm;
597 handler_data_add->irq_source = irq_source;
598
599 list_add_tail(&handler_data_add->list, handler_list);
600
601 INIT_WORK(&handler_data_add->work, dm_irq_work_func);
602
603 if (queue_work(system_highpri_wq, &handler_data_add->work))
604 DRM_DEBUG("Queued work for handling interrupt from "
605 "display for IRQ source %d\n",
606 irq_source);
607 else
608 DRM_ERROR("Failed to queue work for handling interrupt "
609 "from display for IRQ source %d\n",
610 irq_source);
611 }
612}
613
614
615
616
617
618static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
619 enum dc_irq_source irq_source)
620{
621 struct amdgpu_dm_irq_handler_data *handler_data;
622 unsigned long irq_table_flags;
623
624 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
625
626 list_for_each_entry(handler_data,
627 &adev->dm.irq_handler_list_high_tab[irq_source],
628 list) {
629
630
631 handler_data->handler(handler_data->handler_arg);
632 }
633
634 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
635}
636
637
638
639
640
641
642
643
644
645
646static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
647 struct amdgpu_irq_src *source,
648 struct amdgpu_iv_entry *entry)
649{
650
651 enum dc_irq_source src =
652 dc_interrupt_to_irq_source(
653 adev->dm.dc,
654 entry->src_id,
655 entry->src_data[0]);
656
657 dc_interrupt_ack(adev->dm.dc, src);
658
659
660 amdgpu_dm_irq_immediate_work(adev, src);
661
662 amdgpu_dm_irq_schedule_work(adev, src);
663
664 return 0;
665}
666
667static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
668{
669 switch (type) {
670 case AMDGPU_HPD_1:
671 return DC_IRQ_SOURCE_HPD1;
672 case AMDGPU_HPD_2:
673 return DC_IRQ_SOURCE_HPD2;
674 case AMDGPU_HPD_3:
675 return DC_IRQ_SOURCE_HPD3;
676 case AMDGPU_HPD_4:
677 return DC_IRQ_SOURCE_HPD4;
678 case AMDGPU_HPD_5:
679 return DC_IRQ_SOURCE_HPD5;
680 case AMDGPU_HPD_6:
681 return DC_IRQ_SOURCE_HPD6;
682 default:
683 return DC_IRQ_SOURCE_INVALID;
684 }
685}
686
687static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
688 struct amdgpu_irq_src *source,
689 unsigned type,
690 enum amdgpu_interrupt_state state)
691{
692 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
693 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
694
695 dc_interrupt_set(adev->dm.dc, src, st);
696 return 0;
697}
698
699static inline int dm_irq_state(struct amdgpu_device *adev,
700 struct amdgpu_irq_src *source,
701 unsigned crtc_id,
702 enum amdgpu_interrupt_state state,
703 const enum irq_type dal_irq_type,
704 const char *func)
705{
706 bool st;
707 enum dc_irq_source irq_source;
708
709 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
710
711 if (!acrtc) {
712 DRM_ERROR(
713 "%s: crtc is NULL at id :%d\n",
714 func,
715 crtc_id);
716 return 0;
717 }
718
719 if (acrtc->otg_inst == -1)
720 return 0;
721
722 irq_source = dal_irq_type + acrtc->otg_inst;
723
724 st = (state == AMDGPU_IRQ_STATE_ENABLE);
725
726 dc_interrupt_set(adev->dm.dc, irq_source, st);
727 return 0;
728}
729
730static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
731 struct amdgpu_irq_src *source,
732 unsigned crtc_id,
733 enum amdgpu_interrupt_state state)
734{
735 return dm_irq_state(
736 adev,
737 source,
738 crtc_id,
739 state,
740 IRQ_TYPE_PFLIP,
741 __func__);
742}
743
744static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
745 struct amdgpu_irq_src *source,
746 unsigned crtc_id,
747 enum amdgpu_interrupt_state state)
748{
749 return dm_irq_state(
750 adev,
751 source,
752 crtc_id,
753 state,
754 IRQ_TYPE_VBLANK,
755 __func__);
756}
757
758static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
759 struct amdgpu_irq_src *source,
760 unsigned int crtc_id,
761 enum amdgpu_interrupt_state state)
762{
763 return dm_irq_state(
764 adev,
765 source,
766 crtc_id,
767 state,
768 IRQ_TYPE_VLINE0,
769 __func__);
770}
771
772static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev,
773 struct amdgpu_irq_src *source,
774 unsigned int crtc_id,
775 enum amdgpu_interrupt_state state)
776{
777 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
778 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
779
780 dc_interrupt_set(adev->dm.dc, irq_source, st);
781 return 0;
782}
783
784static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
785 struct amdgpu_irq_src *source,
786 unsigned int crtc_id,
787 enum amdgpu_interrupt_state state)
788{
789 return dm_irq_state(
790 adev,
791 source,
792 crtc_id,
793 state,
794 IRQ_TYPE_VUPDATE,
795 __func__);
796}
797
798static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
799 struct amdgpu_irq_src *source,
800 unsigned int type,
801 enum amdgpu_interrupt_state state)
802{
803 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
804 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
805
806 dc_interrupt_set(adev->dm.dc, irq_source, st);
807 return 0;
808}
809
810static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
811 .set = amdgpu_dm_set_crtc_irq_state,
812 .process = amdgpu_dm_irq_handler,
813};
814
815static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
816 .set = amdgpu_dm_set_vline0_irq_state,
817 .process = amdgpu_dm_irq_handler,
818};
819
820static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = {
821 .set = amdgpu_dm_set_dmub_outbox_irq_state,
822 .process = amdgpu_dm_irq_handler,
823};
824
825static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
826 .set = amdgpu_dm_set_vupdate_irq_state,
827 .process = amdgpu_dm_irq_handler,
828};
829
830static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
831 .set = amdgpu_dm_set_dmub_trace_irq_state,
832 .process = amdgpu_dm_irq_handler,
833};
834
835static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
836 .set = amdgpu_dm_set_pflip_irq_state,
837 .process = amdgpu_dm_irq_handler,
838};
839
840static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
841 .set = amdgpu_dm_set_hpd_irq_state,
842 .process = amdgpu_dm_irq_handler,
843};
844
845void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
846{
847 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
848 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
849
850 adev->vline0_irq.num_types = adev->mode_info.num_crtc;
851 adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
852
853 adev->dmub_outbox_irq.num_types = 1;
854 adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs;
855
856 adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
857 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
858
859 adev->dmub_trace_irq.num_types = 1;
860 adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
861
862 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
863 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
864
865 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
866 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
867}
868void amdgpu_dm_outbox_init(struct amdgpu_device *adev)
869{
870 dc_interrupt_set(adev->dm.dc,
871 DC_IRQ_SOURCE_DMCUB_OUTBOX,
872 true);
873}
874
875
876
877
878
879
880
881
882
883void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
884{
885 struct drm_device *dev = adev_to_drm(adev);
886 struct drm_connector *connector;
887 struct drm_connector_list_iter iter;
888
889 drm_connector_list_iter_begin(dev, &iter);
890 drm_for_each_connector_iter(connector, &iter) {
891 struct amdgpu_dm_connector *amdgpu_dm_connector =
892 to_amdgpu_dm_connector(connector);
893
894 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
895
896 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
897 dc_interrupt_set(adev->dm.dc,
898 dc_link->irq_source_hpd,
899 true);
900 }
901
902 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
903 dc_interrupt_set(adev->dm.dc,
904 dc_link->irq_source_hpd_rx,
905 true);
906 }
907 }
908 drm_connector_list_iter_end(&iter);
909}
910
911
912
913
914
915
916
917
918
919void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
920{
921 struct drm_device *dev = adev_to_drm(adev);
922 struct drm_connector *connector;
923 struct drm_connector_list_iter iter;
924
925 drm_connector_list_iter_begin(dev, &iter);
926 drm_for_each_connector_iter(connector, &iter) {
927 struct amdgpu_dm_connector *amdgpu_dm_connector =
928 to_amdgpu_dm_connector(connector);
929 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
930
931 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
932
933 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
934 dc_interrupt_set(adev->dm.dc,
935 dc_link->irq_source_hpd_rx,
936 false);
937 }
938 }
939 drm_connector_list_iter_end(&iter);
940}
941