1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/irq.h>
46#include <linux/pci.h>
47
48#include <drm/drm_crtc_helper.h>
49#include <drm/drm_irq.h>
50#include <drm/drm_vblank.h>
51#include <drm/amdgpu_drm.h>
52#include "amdgpu.h"
53#include "amdgpu_ih.h"
54#include "atom.h"
55#include "amdgpu_connectors.h"
56#include "amdgpu_trace.h"
57#include "amdgpu_amdkfd.h"
58#include "amdgpu_ras.h"
59
60#include <linux/pm_runtime.h>
61
62#ifdef CONFIG_DRM_AMD_DC
63#include "amdgpu_dm_irq.h"
64#endif
65
66#define AMDGPU_WAIT_IDLE_TIMEOUT 200
67
68const char *soc15_ih_clientid_name[] = {
69 "IH",
70 "SDMA2 or ACP",
71 "ATHUB",
72 "BIF",
73 "SDMA3 or DCE",
74 "SDMA4 or ISP",
75 "VMC1 or PCIE0",
76 "RLC",
77 "SDMA0",
78 "SDMA1",
79 "SE0SH",
80 "SE1SH",
81 "SE2SH",
82 "SE3SH",
83 "VCN1 or UVD1",
84 "THM",
85 "VCN or UVD",
86 "SDMA5 or VCE0",
87 "VMC",
88 "SDMA6 or XDMA",
89 "GRBM_CP",
90 "ATS",
91 "ROM_SMUIO",
92 "DF",
93 "SDMA7 or VCE1",
94 "PWR",
95 "reserved",
96 "UTCL2",
97 "EA",
98 "UTCL2LOG",
99 "MP0",
100 "MP1"
101};
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static void amdgpu_hotplug_work_func(struct work_struct *work)
120{
121 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
122 hotplug_work);
123 struct drm_device *dev = adev_to_drm(adev);
124 struct drm_mode_config *mode_config = &dev->mode_config;
125 struct drm_connector *connector;
126 struct drm_connector_list_iter iter;
127
128 mutex_lock(&mode_config->mutex);
129 drm_connector_list_iter_begin(dev, &iter);
130 drm_for_each_connector_iter(connector, &iter)
131 amdgpu_connector_hotplug(connector);
132 drm_connector_list_iter_end(&iter);
133 mutex_unlock(&mode_config->mutex);
134
135 drm_helper_hpd_irq_event(dev);
136}
137
138
139
140
141
142
143
144
145void amdgpu_irq_disable_all(struct amdgpu_device *adev)
146{
147 unsigned long irqflags;
148 unsigned i, j, k;
149 int r;
150
151 spin_lock_irqsave(&adev->irq.lock, irqflags);
152 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
153 if (!adev->irq.client[i].sources)
154 continue;
155
156 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
157 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
158
159 if (!src || !src->funcs->set || !src->num_types)
160 continue;
161
162 for (k = 0; k < src->num_types; ++k) {
163 atomic_set(&src->enabled_types[k], 0);
164 r = src->funcs->set(adev, src, k,
165 AMDGPU_IRQ_STATE_DISABLE);
166 if (r)
167 DRM_ERROR("error disabling interrupt (%d)\n",
168 r);
169 }
170 }
171 }
172 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186irqreturn_t amdgpu_irq_handler(int irq, void *arg)
187{
188 struct drm_device *dev = (struct drm_device *) arg;
189 struct amdgpu_device *adev = drm_to_adev(dev);
190 irqreturn_t ret;
191
192 ret = amdgpu_ih_process(adev, &adev->irq.ih);
193 if (ret == IRQ_HANDLED)
194 pm_runtime_mark_last_busy(dev->dev);
195
196
197
198
199
200
201 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
202 if (adev->nbio.ras_funcs &&
203 adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
204 adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
205
206 if (adev->nbio.ras_funcs &&
207 adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
208 adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
209 }
210
211 return ret;
212}
213
214
215
216
217
218
219
220
221static void amdgpu_irq_handle_ih1(struct work_struct *work)
222{
223 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
224 irq.ih1_work);
225
226 amdgpu_ih_process(adev, &adev->irq.ih1);
227}
228
229
230
231
232
233
234
235
236static void amdgpu_irq_handle_ih2(struct work_struct *work)
237{
238 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
239 irq.ih2_work);
240
241 amdgpu_ih_process(adev, &adev->irq.ih2);
242}
243
244
245
246
247
248
249
250
251static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
252{
253 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
254 irq.ih_soft_work);
255
256 amdgpu_ih_process(adev, &adev->irq.ih_soft);
257}
258
259
260
261
262
263
264
265
266
267
268
269
270static bool amdgpu_msi_ok(struct amdgpu_device *adev)
271{
272 if (amdgpu_msi == 1)
273 return true;
274 else if (amdgpu_msi == 0)
275 return false;
276
277 return true;
278}
279
280
281
282
283
284
285
286
287
288
289
290
291int amdgpu_irq_init(struct amdgpu_device *adev)
292{
293 int r = 0;
294
295 spin_lock_init(&adev->irq.lock);
296
297
298 adev->irq.msi_enabled = false;
299
300 if (amdgpu_msi_ok(adev)) {
301 int nvec = pci_msix_vec_count(adev->pdev);
302 unsigned int flags;
303
304 if (nvec <= 0) {
305 flags = PCI_IRQ_MSI;
306 } else {
307 flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
308 }
309
310 nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
311 if (nvec > 0) {
312 adev->irq.msi_enabled = true;
313 dev_dbg(adev->dev, "using MSI/MSI-X.\n");
314 }
315 }
316
317 if (!amdgpu_device_has_dc_support(adev)) {
318 if (!adev->enable_virtual_display)
319
320
321 adev_to_drm(adev)->vblank_disable_immediate = true;
322
323 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
324 if (r)
325 return r;
326
327
328 INIT_WORK(&adev->hotplug_work,
329 amdgpu_hotplug_work_func);
330 }
331
332 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
333 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
334 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
335
336 adev->irq.installed = true;
337
338 r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
339 if (r) {
340 adev->irq.installed = false;
341 if (!amdgpu_device_has_dc_support(adev))
342 flush_work(&adev->hotplug_work);
343 return r;
344 }
345 adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
346
347 DRM_DEBUG("amdgpu: irq initialized.\n");
348 return 0;
349}
350
351
352
353
354
355
356
357
358
359
360void amdgpu_irq_fini(struct amdgpu_device *adev)
361{
362 unsigned i, j;
363
364 if (adev->irq.installed) {
365 drm_irq_uninstall(adev_to_drm(adev));
366 adev->irq.installed = false;
367 if (adev->irq.msi_enabled)
368 pci_free_irq_vectors(adev->pdev);
369 if (!amdgpu_device_has_dc_support(adev))
370 flush_work(&adev->hotplug_work);
371 }
372
373 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
374 if (!adev->irq.client[i].sources)
375 continue;
376
377 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
378 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
379
380 if (!src)
381 continue;
382
383 kfree(src->enabled_types);
384 src->enabled_types = NULL;
385 }
386 kfree(adev->irq.client[i].sources);
387 adev->irq.client[i].sources = NULL;
388 }
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404int amdgpu_irq_add_id(struct amdgpu_device *adev,
405 unsigned client_id, unsigned src_id,
406 struct amdgpu_irq_src *source)
407{
408 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
409 return -EINVAL;
410
411 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
412 return -EINVAL;
413
414 if (!source->funcs)
415 return -EINVAL;
416
417 if (!adev->irq.client[client_id].sources) {
418 adev->irq.client[client_id].sources =
419 kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
420 sizeof(struct amdgpu_irq_src *),
421 GFP_KERNEL);
422 if (!adev->irq.client[client_id].sources)
423 return -ENOMEM;
424 }
425
426 if (adev->irq.client[client_id].sources[src_id] != NULL)
427 return -EINVAL;
428
429 if (source->num_types && !source->enabled_types) {
430 atomic_t *types;
431
432 types = kcalloc(source->num_types, sizeof(atomic_t),
433 GFP_KERNEL);
434 if (!types)
435 return -ENOMEM;
436
437 source->enabled_types = types;
438 }
439
440 adev->irq.client[client_id].sources[src_id] = source;
441 return 0;
442}
443
444
445
446
447
448
449
450
451
452void amdgpu_irq_dispatch(struct amdgpu_device *adev,
453 struct amdgpu_ih_ring *ih)
454{
455 u32 ring_index = ih->rptr >> 2;
456 struct amdgpu_iv_entry entry;
457 unsigned client_id, src_id;
458 struct amdgpu_irq_src *src;
459 bool handled = false;
460 int r;
461
462 entry.ih = ih;
463 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
464 amdgpu_ih_decode_iv(adev, &entry);
465
466 trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
467
468 client_id = entry.client_id;
469 src_id = entry.src_id;
470
471 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
472 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
473
474 } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
475 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
476
477 } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
478 adev->irq.virq[src_id]) {
479 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
480
481 } else if (!adev->irq.client[client_id].sources) {
482 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
483 client_id, src_id);
484
485 } else if ((src = adev->irq.client[client_id].sources[src_id])) {
486 r = src->funcs->process(adev, src, &entry);
487 if (r < 0)
488 DRM_ERROR("error processing interrupt (%d)\n", r);
489 else if (r)
490 handled = true;
491
492 } else {
493 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
494 }
495
496
497 if (!handled)
498 amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
499}
500
501
502
503
504
505
506
507
508
509
510
511void amdgpu_irq_delegate(struct amdgpu_device *adev,
512 struct amdgpu_iv_entry *entry,
513 unsigned int num_dw)
514{
515 amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
516 schedule_work(&adev->irq.ih_soft_work);
517}
518
519
520
521
522
523
524
525
526
527
528int amdgpu_irq_update(struct amdgpu_device *adev,
529 struct amdgpu_irq_src *src, unsigned type)
530{
531 unsigned long irqflags;
532 enum amdgpu_interrupt_state state;
533 int r;
534
535 spin_lock_irqsave(&adev->irq.lock, irqflags);
536
537
538
539 if (amdgpu_irq_enabled(adev, src, type))
540 state = AMDGPU_IRQ_STATE_ENABLE;
541 else
542 state = AMDGPU_IRQ_STATE_DISABLE;
543
544 r = src->funcs->set(adev, src, type, state);
545 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
546 return r;
547}
548
549
550
551
552
553
554
555
556
557void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
558{
559 int i, j, k;
560
561 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
562 if (!adev->irq.client[i].sources)
563 continue;
564
565 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
566 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
567
568 if (!src || !src->funcs || !src->funcs->set)
569 continue;
570 for (k = 0; k < src->num_types; k++)
571 amdgpu_irq_update(adev, src, k);
572 }
573 }
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
589 unsigned type)
590{
591 if (!adev_to_drm(adev)->irq_enabled)
592 return -ENOENT;
593
594 if (type >= src->num_types)
595 return -EINVAL;
596
597 if (!src->enabled_types || !src->funcs->set)
598 return -EINVAL;
599
600 if (atomic_inc_return(&src->enabled_types[type]) == 1)
601 return amdgpu_irq_update(adev, src, type);
602
603 return 0;
604}
605
606
607
608
609
610
611
612
613
614
615
616
617
618int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
619 unsigned type)
620{
621 if (!adev_to_drm(adev)->irq_enabled)
622 return -ENOENT;
623
624 if (type >= src->num_types)
625 return -EINVAL;
626
627 if (!src->enabled_types || !src->funcs->set)
628 return -EINVAL;
629
630 if (atomic_dec_and_test(&src->enabled_types[type]))
631 return amdgpu_irq_update(adev, src, type);
632
633 return 0;
634}
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
650 unsigned type)
651{
652 if (!adev_to_drm(adev)->irq_enabled)
653 return false;
654
655 if (type >= src->num_types)
656 return false;
657
658 if (!src->enabled_types || !src->funcs->set)
659 return false;
660
661 return !!atomic_read(&src->enabled_types[type]);
662}
663
664
665static void amdgpu_irq_mask(struct irq_data *irqd)
666{
667
668}
669
670static void amdgpu_irq_unmask(struct irq_data *irqd)
671{
672
673}
674
675
676static struct irq_chip amdgpu_irq_chip = {
677 .name = "amdgpu-ih",
678 .irq_mask = amdgpu_irq_mask,
679 .irq_unmask = amdgpu_irq_unmask,
680};
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695static int amdgpu_irqdomain_map(struct irq_domain *d,
696 unsigned int irq, irq_hw_number_t hwirq)
697{
698 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
699 return -EPERM;
700
701 irq_set_chip_and_handler(irq,
702 &amdgpu_irq_chip, handle_simple_irq);
703 return 0;
704}
705
706
707static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
708 .map = amdgpu_irqdomain_map,
709};
710
711
712
713
714
715
716
717
718
719
720
721
722int amdgpu_irq_add_domain(struct amdgpu_device *adev)
723{
724 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
725 &amdgpu_hw_irqdomain_ops, adev);
726 if (!adev->irq.domain) {
727 DRM_ERROR("GPU irq add domain failed\n");
728 return -ENODEV;
729 }
730
731 return 0;
732}
733
734
735
736
737
738
739
740
741
742void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
743{
744 if (adev->irq.domain) {
745 irq_domain_remove(adev->irq.domain);
746 adev->irq.domain = NULL;
747 }
748}
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
764{
765 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
766
767 return adev->irq.virq[src_id];
768}
769