1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/irq.h>
46#include <linux/pci.h>
47
48#include <drm/drm_crtc_helper.h>
49#include <drm/drm_vblank.h>
50#include <drm/amdgpu_drm.h>
51#include <drm/drm_drv.h>
52#include "amdgpu.h"
53#include "amdgpu_ih.h"
54#include "atom.h"
55#include "amdgpu_connectors.h"
56#include "amdgpu_trace.h"
57#include "amdgpu_amdkfd.h"
58#include "amdgpu_ras.h"
59
60#include <linux/pm_runtime.h>
61
62#ifdef CONFIG_DRM_AMD_DC
63#include "amdgpu_dm_irq.h"
64#endif
65
66#define AMDGPU_WAIT_IDLE_TIMEOUT 200
67
68const char *soc15_ih_clientid_name[] = {
69 "IH",
70 "SDMA2 or ACP",
71 "ATHUB",
72 "BIF",
73 "SDMA3 or DCE",
74 "SDMA4 or ISP",
75 "VMC1 or PCIE0",
76 "RLC",
77 "SDMA0",
78 "SDMA1",
79 "SE0SH",
80 "SE1SH",
81 "SE2SH",
82 "SE3SH",
83 "VCN1 or UVD1",
84 "THM",
85 "VCN or UVD",
86 "SDMA5 or VCE0",
87 "VMC",
88 "SDMA6 or XDMA",
89 "GRBM_CP",
90 "ATS",
91 "ROM_SMUIO",
92 "DF",
93 "SDMA7 or VCE1",
94 "PWR",
95 "reserved",
96 "UTCL2",
97 "EA",
98 "UTCL2LOG",
99 "MP0",
100 "MP1"
101};
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static void amdgpu_hotplug_work_func(struct work_struct *work)
120{
121 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
122 hotplug_work);
123 struct drm_device *dev = adev_to_drm(adev);
124 struct drm_mode_config *mode_config = &dev->mode_config;
125 struct drm_connector *connector;
126 struct drm_connector_list_iter iter;
127
128 mutex_lock(&mode_config->mutex);
129 drm_connector_list_iter_begin(dev, &iter);
130 drm_for_each_connector_iter(connector, &iter)
131 amdgpu_connector_hotplug(connector);
132 drm_connector_list_iter_end(&iter);
133 mutex_unlock(&mode_config->mutex);
134
135 drm_helper_hpd_irq_event(dev);
136}
137
138
139
140
141
142
143
144
145void amdgpu_irq_disable_all(struct amdgpu_device *adev)
146{
147 unsigned long irqflags;
148 unsigned i, j, k;
149 int r;
150
151 spin_lock_irqsave(&adev->irq.lock, irqflags);
152 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
153 if (!adev->irq.client[i].sources)
154 continue;
155
156 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
157 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
158
159 if (!src || !src->funcs->set || !src->num_types)
160 continue;
161
162 for (k = 0; k < src->num_types; ++k) {
163 atomic_set(&src->enabled_types[k], 0);
164 r = src->funcs->set(adev, src, k,
165 AMDGPU_IRQ_STATE_DISABLE);
166 if (r)
167 DRM_ERROR("error disabling interrupt (%d)\n",
168 r);
169 }
170 }
171 }
172 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
187{
188 struct drm_device *dev = (struct drm_device *) arg;
189 struct amdgpu_device *adev = drm_to_adev(dev);
190 irqreturn_t ret;
191
192 ret = amdgpu_ih_process(adev, &adev->irq.ih);
193 if (ret == IRQ_HANDLED)
194 pm_runtime_mark_last_busy(dev->dev);
195
196
197
198
199
200
201 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
202 if (adev->nbio.ras_funcs &&
203 adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
204 adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
205
206 if (adev->nbio.ras_funcs &&
207 adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
208 adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
209 }
210
211 return ret;
212}
213
214
215
216
217
218
219
220
221static void amdgpu_irq_handle_ih1(struct work_struct *work)
222{
223 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
224 irq.ih1_work);
225
226 amdgpu_ih_process(adev, &adev->irq.ih1);
227}
228
229
230
231
232
233
234
235
236static void amdgpu_irq_handle_ih2(struct work_struct *work)
237{
238 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
239 irq.ih2_work);
240
241 amdgpu_ih_process(adev, &adev->irq.ih2);
242}
243
244
245
246
247
248
249
250
251static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
252{
253 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
254 irq.ih_soft_work);
255
256 amdgpu_ih_process(adev, &adev->irq.ih_soft);
257}
258
259
260
261
262
263
264
265
266
267
268
269
270static bool amdgpu_msi_ok(struct amdgpu_device *adev)
271{
272 if (amdgpu_msi == 1)
273 return true;
274 else if (amdgpu_msi == 0)
275 return false;
276
277 return true;
278}
279
280static void amdgpu_restore_msix(struct amdgpu_device *adev)
281{
282 u16 ctrl;
283
284 pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
285 if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
286 return;
287
288
289 ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
290 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
291 ctrl |= PCI_MSIX_FLAGS_ENABLE;
292 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
293}
294
295
296
297
298
299
300
301
302
303
304
305
306int amdgpu_irq_init(struct amdgpu_device *adev)
307{
308 int r = 0;
309 unsigned int irq;
310
311 spin_lock_init(&adev->irq.lock);
312
313
314 adev->irq.msi_enabled = false;
315
316 if (amdgpu_msi_ok(adev)) {
317 int nvec = pci_msix_vec_count(adev->pdev);
318 unsigned int flags;
319
320 if (nvec <= 0) {
321 flags = PCI_IRQ_MSI;
322 } else {
323 flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
324 }
325
326 nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
327 if (nvec > 0) {
328 adev->irq.msi_enabled = true;
329 dev_dbg(adev->dev, "using MSI/MSI-X.\n");
330 }
331 }
332
333 if (!amdgpu_device_has_dc_support(adev)) {
334 if (!adev->enable_virtual_display)
335
336
337 adev_to_drm(adev)->vblank_disable_immediate = true;
338
339 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
340 if (r)
341 return r;
342
343
344 INIT_WORK(&adev->hotplug_work,
345 amdgpu_hotplug_work_func);
346 }
347
348 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
349 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
350 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
351
352
353 r = pci_irq_vector(adev->pdev, 0);
354 if (r < 0)
355 return r;
356 irq = r;
357
358
359 r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
360 adev_to_drm(adev));
361 if (r) {
362 if (!amdgpu_device_has_dc_support(adev))
363 flush_work(&adev->hotplug_work);
364 return r;
365 }
366 adev->irq.installed = true;
367 adev->irq.irq = irq;
368 adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
369
370 DRM_DEBUG("amdgpu: irq initialized.\n");
371 return 0;
372}
373
374
375void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
376{
377 if (adev->irq.installed) {
378 free_irq(adev->irq.irq, adev_to_drm(adev));
379 adev->irq.installed = false;
380 if (adev->irq.msi_enabled)
381 pci_free_irq_vectors(adev->pdev);
382
383 if (!amdgpu_device_has_dc_support(adev))
384 flush_work(&adev->hotplug_work);
385 }
386
387 amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
388 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
389 amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
390 amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
391}
392
393
394
395
396
397
398
399
400
401
402void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
403{
404 unsigned i, j;
405
406 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
407 if (!adev->irq.client[i].sources)
408 continue;
409
410 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
411 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
412
413 if (!src)
414 continue;
415
416 kfree(src->enabled_types);
417 src->enabled_types = NULL;
418 }
419 kfree(adev->irq.client[i].sources);
420 adev->irq.client[i].sources = NULL;
421 }
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437int amdgpu_irq_add_id(struct amdgpu_device *adev,
438 unsigned client_id, unsigned src_id,
439 struct amdgpu_irq_src *source)
440{
441 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
442 return -EINVAL;
443
444 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
445 return -EINVAL;
446
447 if (!source->funcs)
448 return -EINVAL;
449
450 if (!adev->irq.client[client_id].sources) {
451 adev->irq.client[client_id].sources =
452 kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
453 sizeof(struct amdgpu_irq_src *),
454 GFP_KERNEL);
455 if (!adev->irq.client[client_id].sources)
456 return -ENOMEM;
457 }
458
459 if (adev->irq.client[client_id].sources[src_id] != NULL)
460 return -EINVAL;
461
462 if (source->num_types && !source->enabled_types) {
463 atomic_t *types;
464
465 types = kcalloc(source->num_types, sizeof(atomic_t),
466 GFP_KERNEL);
467 if (!types)
468 return -ENOMEM;
469
470 source->enabled_types = types;
471 }
472
473 adev->irq.client[client_id].sources[src_id] = source;
474 return 0;
475}
476
477
478
479
480
481
482
483
484
485void amdgpu_irq_dispatch(struct amdgpu_device *adev,
486 struct amdgpu_ih_ring *ih)
487{
488 u32 ring_index = ih->rptr >> 2;
489 struct amdgpu_iv_entry entry;
490 unsigned client_id, src_id;
491 struct amdgpu_irq_src *src;
492 bool handled = false;
493 int r;
494
495 entry.ih = ih;
496 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
497 amdgpu_ih_decode_iv(adev, &entry);
498
499 trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
500
501 client_id = entry.client_id;
502 src_id = entry.src_id;
503
504 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
505 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
506
507 } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
508 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
509
510 } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
511 adev->irq.virq[src_id]) {
512 generic_handle_domain_irq(adev->irq.domain, src_id);
513
514 } else if (!adev->irq.client[client_id].sources) {
515 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
516 client_id, src_id);
517
518 } else if ((src = adev->irq.client[client_id].sources[src_id])) {
519 r = src->funcs->process(adev, src, &entry);
520 if (r < 0)
521 DRM_ERROR("error processing interrupt (%d)\n", r);
522 else if (r)
523 handled = true;
524
525 } else {
526 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
527 }
528
529
530 if (!handled)
531 amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
532}
533
534
535
536
537
538
539
540
541
542
543
544void amdgpu_irq_delegate(struct amdgpu_device *adev,
545 struct amdgpu_iv_entry *entry,
546 unsigned int num_dw)
547{
548 amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
549 schedule_work(&adev->irq.ih_soft_work);
550}
551
552
553
554
555
556
557
558
559
560
561int amdgpu_irq_update(struct amdgpu_device *adev,
562 struct amdgpu_irq_src *src, unsigned type)
563{
564 unsigned long irqflags;
565 enum amdgpu_interrupt_state state;
566 int r;
567
568 spin_lock_irqsave(&adev->irq.lock, irqflags);
569
570
571
572 if (amdgpu_irq_enabled(adev, src, type))
573 state = AMDGPU_IRQ_STATE_ENABLE;
574 else
575 state = AMDGPU_IRQ_STATE_DISABLE;
576
577 r = src->funcs->set(adev, src, type, state);
578 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
579 return r;
580}
581
582
583
584
585
586
587
588
589
590void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
591{
592 int i, j, k;
593
594 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
595 amdgpu_restore_msix(adev);
596
597 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
598 if (!adev->irq.client[i].sources)
599 continue;
600
601 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
602 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
603
604 if (!src || !src->funcs || !src->funcs->set)
605 continue;
606 for (k = 0; k < src->num_types; k++)
607 amdgpu_irq_update(adev, src, k);
608 }
609 }
610}
611
612
613
614
615
616
617
618
619
620
621
622
623
624int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
625 unsigned type)
626{
627 if (!adev->irq.installed)
628 return -ENOENT;
629
630 if (type >= src->num_types)
631 return -EINVAL;
632
633 if (!src->enabled_types || !src->funcs->set)
634 return -EINVAL;
635
636 if (atomic_inc_return(&src->enabled_types[type]) == 1)
637 return amdgpu_irq_update(adev, src, type);
638
639 return 0;
640}
641
642
643
644
645
646
647
648
649
650
651
652
653
654int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
655 unsigned type)
656{
657 if (!adev->irq.installed)
658 return -ENOENT;
659
660 if (type >= src->num_types)
661 return -EINVAL;
662
663 if (!src->enabled_types || !src->funcs->set)
664 return -EINVAL;
665
666 if (atomic_dec_and_test(&src->enabled_types[type]))
667 return amdgpu_irq_update(adev, src, type);
668
669 return 0;
670}
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
686 unsigned type)
687{
688 if (!adev->irq.installed)
689 return false;
690
691 if (type >= src->num_types)
692 return false;
693
694 if (!src->enabled_types || !src->funcs->set)
695 return false;
696
697 return !!atomic_read(&src->enabled_types[type]);
698}
699
700
701static void amdgpu_irq_mask(struct irq_data *irqd)
702{
703
704}
705
706static void amdgpu_irq_unmask(struct irq_data *irqd)
707{
708
709}
710
711
712static struct irq_chip amdgpu_irq_chip = {
713 .name = "amdgpu-ih",
714 .irq_mask = amdgpu_irq_mask,
715 .irq_unmask = amdgpu_irq_unmask,
716};
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731static int amdgpu_irqdomain_map(struct irq_domain *d,
732 unsigned int irq, irq_hw_number_t hwirq)
733{
734 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
735 return -EPERM;
736
737 irq_set_chip_and_handler(irq,
738 &amdgpu_irq_chip, handle_simple_irq);
739 return 0;
740}
741
742
743static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
744 .map = amdgpu_irqdomain_map,
745};
746
747
748
749
750
751
752
753
754
755
756
757
758int amdgpu_irq_add_domain(struct amdgpu_device *adev)
759{
760 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
761 &amdgpu_hw_irqdomain_ops, adev);
762 if (!adev->irq.domain) {
763 DRM_ERROR("GPU irq add domain failed\n");
764 return -ENODEV;
765 }
766
767 return 0;
768}
769
770
771
772
773
774
775
776
777
778void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
779{
780 if (adev->irq.domain) {
781 irq_domain_remove(adev->irq.domain);
782 adev->irq.domain = NULL;
783 }
784}
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
800{
801 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
802
803 return adev->irq.virq[src_id];
804}
805