1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/kthread.h>
29#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
43#include "amd_pcie.h"
44#ifdef CONFIG_DRM_AMDGPU_SI
45#include "si.h"
46#endif
47#ifdef CONFIG_DRM_AMDGPU_CIK
48#include "cik.h"
49#endif
50#include "vi.h"
51#include "bif/bif_4_1_d.h"
52#include <linux/pci.h>
53#include <linux/firmware.h>
54
55static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
56static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
57
58static const char *amdgpu_asic_name[] = {
59 "TAHITI",
60 "PITCAIRN",
61 "VERDE",
62 "OLAND",
63 "HAINAN",
64 "BONAIRE",
65 "KAVERI",
66 "KABINI",
67 "HAWAII",
68 "MULLINS",
69 "TOPAZ",
70 "TONGA",
71 "FIJI",
72 "CARRIZO",
73 "STONEY",
74 "POLARIS10",
75 "POLARIS11",
76 "POLARIS12",
77 "LAST",
78};
79
80bool amdgpu_device_is_px(struct drm_device *dev)
81{
82 struct amdgpu_device *adev = dev->dev_private;
83
84 if (adev->flags & AMD_IS_PX)
85 return true;
86 return false;
87}
88
89
90
91
92uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
93 bool always_indirect)
94{
95 uint32_t ret;
96
97 if ((reg * 4) < adev->rmmio_size && !always_indirect)
98 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
99 else {
100 unsigned long flags;
101
102 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
103 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
104 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
105 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
106 }
107 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
108 return ret;
109}
110
111void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
112 bool always_indirect)
113{
114 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
115
116 if ((reg * 4) < adev->rmmio_size && !always_indirect)
117 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
118 else {
119 unsigned long flags;
120
121 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
122 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
123 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
124 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
125 }
126}
127
128u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
129{
130 if ((reg * 4) < adev->rio_mem_size)
131 return ioread32(adev->rio_mem + (reg * 4));
132 else {
133 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
134 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
135 }
136}
137
138void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
139{
140
141 if ((reg * 4) < adev->rio_mem_size)
142 iowrite32(v, adev->rio_mem + (reg * 4));
143 else {
144 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
145 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
146 }
147}
148
149
150
151
152
153
154
155
156
157
158u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
159{
160 if (index < adev->doorbell.num_doorbells) {
161 return readl(adev->doorbell.ptr + index);
162 } else {
163 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
164 return 0;
165 }
166}
167
168
169
170
171
172
173
174
175
176
177
178void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
179{
180 if (index < adev->doorbell.num_doorbells) {
181 writel(v, adev->doorbell.ptr + index);
182 } else {
183 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
184 }
185}
186
187
188
189
190
191
192
193
194
195
196
197static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
198{
199 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
200 BUG();
201 return 0;
202}
203
204
205
206
207
208
209
210
211
212
213
214static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
215{
216 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
217 reg, v);
218 BUG();
219}
220
221
222
223
224
225
226
227
228
229
230
231
232static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
233 uint32_t block, uint32_t reg)
234{
235 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
236 reg, block);
237 BUG();
238 return 0;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
253 uint32_t block,
254 uint32_t reg, uint32_t v)
255{
256 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
257 reg, block, v);
258 BUG();
259}
260
261static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
262{
263 int r;
264
265 if (adev->vram_scratch.robj == NULL) {
266 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
267 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
268 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
269 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
270 NULL, NULL, &adev->vram_scratch.robj);
271 if (r) {
272 return r;
273 }
274 }
275
276 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
277 if (unlikely(r != 0))
278 return r;
279 r = amdgpu_bo_pin(adev->vram_scratch.robj,
280 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
281 if (r) {
282 amdgpu_bo_unreserve(adev->vram_scratch.robj);
283 return r;
284 }
285 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
286 (void **)&adev->vram_scratch.ptr);
287 if (r)
288 amdgpu_bo_unpin(adev->vram_scratch.robj);
289 amdgpu_bo_unreserve(adev->vram_scratch.robj);
290
291 return r;
292}
293
294static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
295{
296 int r;
297
298 if (adev->vram_scratch.robj == NULL) {
299 return;
300 }
301 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
302 if (likely(r == 0)) {
303 amdgpu_bo_kunmap(adev->vram_scratch.robj);
304 amdgpu_bo_unpin(adev->vram_scratch.robj);
305 amdgpu_bo_unreserve(adev->vram_scratch.robj);
306 }
307 amdgpu_bo_unref(&adev->vram_scratch.robj);
308}
309
310
311
312
313
314
315
316
317
318
319
320void amdgpu_program_register_sequence(struct amdgpu_device *adev,
321 const u32 *registers,
322 const u32 array_size)
323{
324 u32 tmp, reg, and_mask, or_mask;
325 int i;
326
327 if (array_size % 3)
328 return;
329
330 for (i = 0; i < array_size; i +=3) {
331 reg = registers[i + 0];
332 and_mask = registers[i + 1];
333 or_mask = registers[i + 2];
334
335 if (and_mask == 0xffffffff) {
336 tmp = or_mask;
337 } else {
338 tmp = RREG32(reg);
339 tmp &= ~and_mask;
340 tmp |= or_mask;
341 }
342 WREG32(reg, tmp);
343 }
344}
345
346void amdgpu_pci_config_reset(struct amdgpu_device *adev)
347{
348 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362static int amdgpu_doorbell_init(struct amdgpu_device *adev)
363{
364
365 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
366 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
367
368 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
369 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
370 if (adev->doorbell.num_doorbells == 0)
371 return -EINVAL;
372
373 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
374 if (adev->doorbell.ptr == NULL) {
375 return -ENOMEM;
376 }
377 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
378 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
379
380 return 0;
381}
382
383
384
385
386
387
388
389
390static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
391{
392 iounmap(adev->doorbell.ptr);
393 adev->doorbell.ptr = NULL;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
410 phys_addr_t *aperture_base,
411 size_t *aperture_size,
412 size_t *start_offset)
413{
414
415
416
417
418 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
419 *aperture_base = adev->doorbell.base;
420 *aperture_size = adev->doorbell.size;
421 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
422 } else {
423 *aperture_base = 0;
424 *aperture_size = 0;
425 *start_offset = 0;
426 }
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444static void amdgpu_wb_fini(struct amdgpu_device *adev)
445{
446 if (adev->wb.wb_obj) {
447 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
448 &adev->wb.gpu_addr,
449 (void **)&adev->wb.wb);
450 adev->wb.wb_obj = NULL;
451 }
452}
453
454
455
456
457
458
459
460
461
462
463static int amdgpu_wb_init(struct amdgpu_device *adev)
464{
465 int r;
466
467 if (adev->wb.wb_obj == NULL) {
468 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
469 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
470 &adev->wb.wb_obj, &adev->wb.gpu_addr,
471 (void **)&adev->wb.wb);
472 if (r) {
473 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
474 return r;
475 }
476
477 adev->wb.num_wb = AMDGPU_MAX_WB;
478 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
479
480
481 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
482 }
483
484 return 0;
485}
486
487
488
489
490
491
492
493
494
495
496int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
497{
498 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
499 if (offset < adev->wb.num_wb) {
500 __set_bit(offset, adev->wb.used);
501 *wb = offset;
502 return 0;
503 } else {
504 return -EINVAL;
505 }
506}
507
508
509
510
511
512
513
514
515
516void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
517{
518 if (wb < adev->wb.num_wb)
519 __clear_bit(wb, adev->wb.used);
520}
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
555{
556 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
557
558 mc->vram_start = base;
559 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
560 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
561 mc->real_vram_size = mc->aper_size;
562 mc->mc_vram_size = mc->aper_size;
563 }
564 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
565 if (limit && limit < mc->real_vram_size)
566 mc->real_vram_size = limit;
567 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
568 mc->mc_vram_size >> 20, mc->vram_start,
569 mc->vram_end, mc->real_vram_size >> 20);
570}
571
572
573
574
575
576
577
578
579
580
581
582
583
584void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
585{
586 u64 size_af, size_bf;
587
588 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
589 size_bf = mc->vram_start & ~mc->gtt_base_align;
590 if (size_bf > size_af) {
591 if (mc->gtt_size > size_bf) {
592 dev_warn(adev->dev, "limiting GTT\n");
593 mc->gtt_size = size_bf;
594 }
595 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
596 } else {
597 if (mc->gtt_size > size_af) {
598 dev_warn(adev->dev, "limiting GTT\n");
599 mc->gtt_size = size_af;
600 }
601 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
602 }
603 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
604 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
605 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620bool amdgpu_card_posted(struct amdgpu_device *adev)
621{
622 uint32_t reg;
623
624
625 reg = RREG32(mmCONFIG_MEMSIZE);
626
627 if (reg)
628 return true;
629
630 return false;
631
632}
633
634static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
635{
636 if (amdgpu_sriov_vf(adev))
637 return false;
638
639 if (amdgpu_passthrough(adev)) {
640
641
642
643
644
645 if (adev->asic_type == CHIP_FIJI) {
646 int err;
647 uint32_t fw_ver;
648 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
649
650 if (err)
651 return true;
652
653 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
654 if (fw_ver < 0x00160e00)
655 return true;
656 }
657 }
658 return !amdgpu_card_posted(adev);
659}
660
661
662
663
664
665
666
667
668
669
670
671int amdgpu_dummy_page_init(struct amdgpu_device *adev)
672{
673 if (adev->dummy_page.page)
674 return 0;
675 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
676 if (adev->dummy_page.page == NULL)
677 return -ENOMEM;
678 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
679 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
680 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
681 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
682 __free_page(adev->dummy_page.page);
683 adev->dummy_page.page = NULL;
684 return -ENOMEM;
685 }
686 return 0;
687}
688
689
690
691
692
693
694
695
696void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
697{
698 if (adev->dummy_page.page == NULL)
699 return;
700 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
701 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
702 __free_page(adev->dummy_page.page);
703 adev->dummy_page.page = NULL;
704}
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
726{
727 return 0;
728}
729
730
731
732
733
734
735
736
737
738
739static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
740{
741
742}
743
744
745
746
747
748
749
750
751
752
753static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
754{
755 return 0;
756}
757
758
759
760
761
762
763
764
765
766
767static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
768{
769
770}
771
772
773
774
775
776
777
778
779
780
781static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
782{
783 struct amdgpu_device *adev = info->dev->dev_private;
784
785 WREG32(reg, val);
786}
787
788
789
790
791
792
793
794
795
796
797static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
798{
799 struct amdgpu_device *adev = info->dev->dev_private;
800 uint32_t r;
801
802 r = RREG32(reg);
803 return r;
804}
805
806
807
808
809
810
811
812
813
814
815static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
816{
817 struct amdgpu_device *adev = info->dev->dev_private;
818
819 WREG32_IO(reg, val);
820}
821
822
823
824
825
826
827
828
829
830
831static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
832{
833 struct amdgpu_device *adev = info->dev->dev_private;
834 uint32_t r;
835
836 r = RREG32_IO(reg);
837 return r;
838}
839
840
841
842
843
844
845
846
847
848
849static void amdgpu_atombios_fini(struct amdgpu_device *adev)
850{
851 if (adev->mode_info.atom_context) {
852 kfree(adev->mode_info.atom_context->scratch);
853 kfree(adev->mode_info.atom_context->iio);
854 }
855 kfree(adev->mode_info.atom_context);
856 adev->mode_info.atom_context = NULL;
857 kfree(adev->mode_info.atom_card_info);
858 adev->mode_info.atom_card_info = NULL;
859}
860
861
862
863
864
865
866
867
868
869
870
871static int amdgpu_atombios_init(struct amdgpu_device *adev)
872{
873 struct card_info *atom_card_info =
874 kzalloc(sizeof(struct card_info), GFP_KERNEL);
875
876 if (!atom_card_info)
877 return -ENOMEM;
878
879 adev->mode_info.atom_card_info = atom_card_info;
880 atom_card_info->dev = adev->ddev;
881 atom_card_info->reg_read = cail_reg_read;
882 atom_card_info->reg_write = cail_reg_write;
883
884 if (adev->rio_mem) {
885 atom_card_info->ioreg_read = cail_ioreg_read;
886 atom_card_info->ioreg_write = cail_ioreg_write;
887 } else {
888 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
889 atom_card_info->ioreg_read = cail_reg_read;
890 atom_card_info->ioreg_write = cail_reg_write;
891 }
892 atom_card_info->mc_read = cail_mc_read;
893 atom_card_info->mc_write = cail_mc_write;
894 atom_card_info->pll_read = cail_pll_read;
895 atom_card_info->pll_write = cail_pll_write;
896
897 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
898 if (!adev->mode_info.atom_context) {
899 amdgpu_atombios_fini(adev);
900 return -ENOMEM;
901 }
902
903 mutex_init(&adev->mode_info.atom_context->mutex);
904 amdgpu_atombios_scratch_regs_init(adev);
905 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
906 return 0;
907}
908
909
910
911
912
913
914
915
916
917
918
919static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
920{
921 struct amdgpu_device *adev = cookie;
922 amdgpu_asic_set_vga_state(adev, state);
923 if (state)
924 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
925 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
926 else
927 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
928}
929
930
931
932
933
934
935
936
937
938static bool amdgpu_check_pot_argument(int arg)
939{
940 return (arg & (arg - 1)) == 0;
941}
942
943
944
945
946
947
948
949
950
951static void amdgpu_check_arguments(struct amdgpu_device *adev)
952{
953 if (amdgpu_sched_jobs < 4) {
954 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
955 amdgpu_sched_jobs);
956 amdgpu_sched_jobs = 4;
957 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
958 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
959 amdgpu_sched_jobs);
960 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
961 }
962
963 if (amdgpu_gart_size != -1) {
964
965 if (amdgpu_gart_size < 32) {
966 dev_warn(adev->dev, "gart size (%d) too small\n",
967 amdgpu_gart_size);
968 amdgpu_gart_size = -1;
969 }
970 }
971
972 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
973 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
974 amdgpu_vm_size);
975 amdgpu_vm_size = 8;
976 }
977
978 if (amdgpu_vm_size < 1) {
979 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
980 amdgpu_vm_size);
981 amdgpu_vm_size = 8;
982 }
983
984
985
986
987 if (amdgpu_vm_size > 1024) {
988 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
989 amdgpu_vm_size);
990 amdgpu_vm_size = 8;
991 }
992
993
994
995
996 if (amdgpu_vm_block_size == -1) {
997
998
999 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1000
1001
1002
1003 if (amdgpu_vm_size <= 8)
1004 amdgpu_vm_block_size = bits - 9;
1005 else
1006 amdgpu_vm_block_size = (bits + 3) / 2;
1007
1008 } else if (amdgpu_vm_block_size < 9) {
1009 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1010 amdgpu_vm_block_size);
1011 amdgpu_vm_block_size = 9;
1012 }
1013
1014 if (amdgpu_vm_block_size > 24 ||
1015 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1016 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1017 amdgpu_vm_block_size);
1018 amdgpu_vm_block_size = 9;
1019 }
1020
1021 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
1022 !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
1023 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1024 amdgpu_vram_page_split);
1025 amdgpu_vram_page_split = 1024;
1026 }
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1039{
1040 struct drm_device *dev = pci_get_drvdata(pdev);
1041
1042 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1043 return;
1044
1045 if (state == VGA_SWITCHEROO_ON) {
1046 unsigned d3_delay = dev->pdev->d3_delay;
1047
1048 printk(KERN_INFO "amdgpu: switched on\n");
1049
1050 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1051
1052 amdgpu_device_resume(dev, true, true);
1053
1054 dev->pdev->d3_delay = d3_delay;
1055
1056 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1057 drm_kms_helper_poll_enable(dev);
1058 } else {
1059 printk(KERN_INFO "amdgpu: switched off\n");
1060 drm_kms_helper_poll_disable(dev);
1061 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1062 amdgpu_device_suspend(dev, true, true);
1063 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1064 }
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1077{
1078 struct drm_device *dev = pci_get_drvdata(pdev);
1079
1080
1081
1082
1083
1084
1085 return dev->open_count == 0;
1086}
1087
1088static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1089 .set_gpu_state = amdgpu_switcheroo_set_state,
1090 .reprobe = NULL,
1091 .can_switch = amdgpu_switcheroo_can_switch,
1092};
1093
1094int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1095 enum amd_ip_block_type block_type,
1096 enum amd_clockgating_state state)
1097{
1098 int i, r = 0;
1099
1100 for (i = 0; i < adev->num_ip_blocks; i++) {
1101 if (!adev->ip_blocks[i].status.valid)
1102 continue;
1103 if (adev->ip_blocks[i].version->type == block_type) {
1104 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1105 state);
1106 if (r)
1107 return r;
1108 break;
1109 }
1110 }
1111 return r;
1112}
1113
1114int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1115 enum amd_ip_block_type block_type,
1116 enum amd_powergating_state state)
1117{
1118 int i, r = 0;
1119
1120 for (i = 0; i < adev->num_ip_blocks; i++) {
1121 if (!adev->ip_blocks[i].status.valid)
1122 continue;
1123 if (adev->ip_blocks[i].version->type == block_type) {
1124 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1125 state);
1126 if (r)
1127 return r;
1128 break;
1129 }
1130 }
1131 return r;
1132}
1133
1134int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1135 enum amd_ip_block_type block_type)
1136{
1137 int i, r;
1138
1139 for (i = 0; i < adev->num_ip_blocks; i++) {
1140 if (!adev->ip_blocks[i].status.valid)
1141 continue;
1142 if (adev->ip_blocks[i].version->type == block_type) {
1143 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1144 if (r)
1145 return r;
1146 break;
1147 }
1148 }
1149 return 0;
1150
1151}
1152
1153bool amdgpu_is_idle(struct amdgpu_device *adev,
1154 enum amd_ip_block_type block_type)
1155{
1156 int i;
1157
1158 for (i = 0; i < adev->num_ip_blocks; i++) {
1159 if (!adev->ip_blocks[i].status.valid)
1160 continue;
1161 if (adev->ip_blocks[i].version->type == block_type)
1162 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1163 }
1164 return true;
1165
1166}
1167
1168struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1169 enum amd_ip_block_type type)
1170{
1171 int i;
1172
1173 for (i = 0; i < adev->num_ip_blocks; i++)
1174 if (adev->ip_blocks[i].version->type == type)
1175 return &adev->ip_blocks[i];
1176
1177 return NULL;
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1192 enum amd_ip_block_type type,
1193 u32 major, u32 minor)
1194{
1195 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1196
1197 if (ip_block && ((ip_block->version->major > major) ||
1198 ((ip_block->version->major == major) &&
1199 (ip_block->version->minor >= minor))))
1200 return 0;
1201
1202 return 1;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int amdgpu_ip_block_add(struct amdgpu_device *adev,
1215 const struct amdgpu_ip_block_version *ip_block_version)
1216{
1217 if (!ip_block_version)
1218 return -EINVAL;
1219
1220 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1221
1222 return 0;
1223}
1224
1225static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1226{
1227 adev->enable_virtual_display = false;
1228
1229 if (amdgpu_virtual_display) {
1230 struct drm_device *ddev = adev->ddev;
1231 const char *pci_address_name = pci_name(ddev->pdev);
1232 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1233
1234 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1235 pciaddstr_tmp = pciaddstr;
1236 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1237 pciaddname = strsep(&pciaddname_tmp, ",");
1238 if (!strcmp(pci_address_name, pciaddname)) {
1239 long num_crtc;
1240 int res = -1;
1241
1242 adev->enable_virtual_display = true;
1243
1244 if (pciaddname_tmp)
1245 res = kstrtol(pciaddname_tmp, 10,
1246 &num_crtc);
1247
1248 if (!res) {
1249 if (num_crtc < 1)
1250 num_crtc = 1;
1251 if (num_crtc > 6)
1252 num_crtc = 6;
1253 adev->mode_info.num_crtc = num_crtc;
1254 } else {
1255 adev->mode_info.num_crtc = 1;
1256 }
1257 break;
1258 }
1259 }
1260
1261 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1262 amdgpu_virtual_display, pci_address_name,
1263 adev->enable_virtual_display, adev->mode_info.num_crtc);
1264
1265 kfree(pciaddstr);
1266 }
1267}
1268
1269static int amdgpu_early_init(struct amdgpu_device *adev)
1270{
1271 int i, r;
1272
1273 amdgpu_device_enable_virtual_display(adev);
1274
1275 switch (adev->asic_type) {
1276 case CHIP_TOPAZ:
1277 case CHIP_TONGA:
1278 case CHIP_FIJI:
1279 case CHIP_POLARIS11:
1280 case CHIP_POLARIS10:
1281 case CHIP_POLARIS12:
1282 case CHIP_CARRIZO:
1283 case CHIP_STONEY:
1284 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
1285 adev->family = AMDGPU_FAMILY_CZ;
1286 else
1287 adev->family = AMDGPU_FAMILY_VI;
1288
1289 r = vi_set_ip_blocks(adev);
1290 if (r)
1291 return r;
1292 break;
1293#ifdef CONFIG_DRM_AMDGPU_SI
1294 case CHIP_VERDE:
1295 case CHIP_TAHITI:
1296 case CHIP_PITCAIRN:
1297 case CHIP_OLAND:
1298 case CHIP_HAINAN:
1299 adev->family = AMDGPU_FAMILY_SI;
1300 r = si_set_ip_blocks(adev);
1301 if (r)
1302 return r;
1303 break;
1304#endif
1305#ifdef CONFIG_DRM_AMDGPU_CIK
1306 case CHIP_BONAIRE:
1307 case CHIP_HAWAII:
1308 case CHIP_KAVERI:
1309 case CHIP_KABINI:
1310 case CHIP_MULLINS:
1311 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1312 adev->family = AMDGPU_FAMILY_CI;
1313 else
1314 adev->family = AMDGPU_FAMILY_KV;
1315
1316 r = cik_set_ip_blocks(adev);
1317 if (r)
1318 return r;
1319 break;
1320#endif
1321 default:
1322
1323 return -EINVAL;
1324 }
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) {
1327 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1328 DRM_ERROR("disabled ip block: %d\n", i);
1329 adev->ip_blocks[i].status.valid = false;
1330 } else {
1331 if (adev->ip_blocks[i].version->funcs->early_init) {
1332 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1333 if (r == -ENOENT) {
1334 adev->ip_blocks[i].status.valid = false;
1335 } else if (r) {
1336 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1337 adev->ip_blocks[i].version->funcs->name, r);
1338 return r;
1339 } else {
1340 adev->ip_blocks[i].status.valid = true;
1341 }
1342 } else {
1343 adev->ip_blocks[i].status.valid = true;
1344 }
1345 }
1346 }
1347
1348 adev->cg_flags &= amdgpu_cg_mask;
1349 adev->pg_flags &= amdgpu_pg_mask;
1350
1351 return 0;
1352}
1353
1354static int amdgpu_init(struct amdgpu_device *adev)
1355{
1356 int i, r;
1357
1358 for (i = 0; i < adev->num_ip_blocks; i++) {
1359 if (!adev->ip_blocks[i].status.valid)
1360 continue;
1361 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1362 if (r) {
1363 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1364 adev->ip_blocks[i].version->funcs->name, r);
1365 return r;
1366 }
1367 adev->ip_blocks[i].status.sw = true;
1368
1369 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1370 r = amdgpu_vram_scratch_init(adev);
1371 if (r) {
1372 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1373 return r;
1374 }
1375 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1376 if (r) {
1377 DRM_ERROR("hw_init %d failed %d\n", i, r);
1378 return r;
1379 }
1380 r = amdgpu_wb_init(adev);
1381 if (r) {
1382 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1383 return r;
1384 }
1385 adev->ip_blocks[i].status.hw = true;
1386 }
1387 }
1388
1389 for (i = 0; i < adev->num_ip_blocks; i++) {
1390 if (!adev->ip_blocks[i].status.sw)
1391 continue;
1392
1393 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1394 continue;
1395 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1396 if (r) {
1397 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1398 adev->ip_blocks[i].version->funcs->name, r);
1399 return r;
1400 }
1401 adev->ip_blocks[i].status.hw = true;
1402 }
1403
1404 return 0;
1405}
1406
1407static int amdgpu_late_init(struct amdgpu_device *adev)
1408{
1409 int i = 0, r;
1410
1411 for (i = 0; i < adev->num_ip_blocks; i++) {
1412 if (!adev->ip_blocks[i].status.valid)
1413 continue;
1414 if (adev->ip_blocks[i].version->funcs->late_init) {
1415 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1416 if (r) {
1417 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1418 adev->ip_blocks[i].version->funcs->name, r);
1419 return r;
1420 }
1421 adev->ip_blocks[i].status.late_initialized = true;
1422 }
1423
1424 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1425 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1426
1427 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1428 AMD_CG_STATE_GATE);
1429 if (r) {
1430 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1431 adev->ip_blocks[i].version->funcs->name, r);
1432 return r;
1433 }
1434 }
1435 }
1436
1437 return 0;
1438}
1439
1440static int amdgpu_fini(struct amdgpu_device *adev)
1441{
1442 int i, r;
1443
1444
1445 for (i = 0; i < adev->num_ip_blocks; i++) {
1446 if (!adev->ip_blocks[i].status.hw)
1447 continue;
1448 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1449
1450 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1451 AMD_CG_STATE_UNGATE);
1452 if (r) {
1453 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1454 adev->ip_blocks[i].version->funcs->name, r);
1455 return r;
1456 }
1457 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1458
1459 if (r) {
1460 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1461 adev->ip_blocks[i].version->funcs->name, r);
1462 }
1463 adev->ip_blocks[i].status.hw = false;
1464 break;
1465 }
1466 }
1467
1468 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1469 if (!adev->ip_blocks[i].status.hw)
1470 continue;
1471 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1472 amdgpu_wb_fini(adev);
1473 amdgpu_vram_scratch_fini(adev);
1474 }
1475
1476 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1477 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1478
1479 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1480 AMD_CG_STATE_UNGATE);
1481 if (r) {
1482 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1483 adev->ip_blocks[i].version->funcs->name, r);
1484 return r;
1485 }
1486 }
1487
1488 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1489
1490 if (r) {
1491 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1492 adev->ip_blocks[i].version->funcs->name, r);
1493 }
1494
1495 adev->ip_blocks[i].status.hw = false;
1496 }
1497
1498 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1499 if (!adev->ip_blocks[i].status.sw)
1500 continue;
1501 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1502
1503 if (r) {
1504 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1505 adev->ip_blocks[i].version->funcs->name, r);
1506 }
1507 adev->ip_blocks[i].status.sw = false;
1508 adev->ip_blocks[i].status.valid = false;
1509 }
1510
1511 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1512 if (!adev->ip_blocks[i].status.late_initialized)
1513 continue;
1514 if (adev->ip_blocks[i].version->funcs->late_fini)
1515 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1516 adev->ip_blocks[i].status.late_initialized = false;
1517 }
1518
1519 return 0;
1520}
1521
1522int amdgpu_suspend(struct amdgpu_device *adev)
1523{
1524 int i, r;
1525
1526
1527 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1528 AMD_CG_STATE_UNGATE);
1529 if (r) {
1530 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1531 }
1532
1533 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1534 if (!adev->ip_blocks[i].status.valid)
1535 continue;
1536
1537 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1538 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1539 AMD_CG_STATE_UNGATE);
1540 if (r) {
1541 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1542 adev->ip_blocks[i].version->funcs->name, r);
1543 }
1544 }
1545
1546 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1547
1548 if (r) {
1549 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1550 adev->ip_blocks[i].version->funcs->name, r);
1551 }
1552 }
1553
1554 return 0;
1555}
1556
1557static int amdgpu_resume(struct amdgpu_device *adev)
1558{
1559 int i, r;
1560
1561 for (i = 0; i < adev->num_ip_blocks; i++) {
1562 if (!adev->ip_blocks[i].status.valid)
1563 continue;
1564 r = adev->ip_blocks[i].version->funcs->resume(adev);
1565 if (r) {
1566 DRM_ERROR("resume of IP block <%s> failed %d\n",
1567 adev->ip_blocks[i].version->funcs->name, r);
1568 return r;
1569 }
1570 }
1571
1572 return 0;
1573}
1574
1575static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
1576{
1577 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1578 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593int amdgpu_device_init(struct amdgpu_device *adev,
1594 struct drm_device *ddev,
1595 struct pci_dev *pdev,
1596 uint32_t flags)
1597{
1598 int r, i;
1599 bool runtime = false;
1600 u32 max_MBps;
1601
1602 adev->shutdown = false;
1603 adev->dev = &pdev->dev;
1604 adev->ddev = ddev;
1605 adev->pdev = pdev;
1606 adev->flags = flags;
1607 adev->asic_type = flags & AMD_ASIC_MASK;
1608 adev->is_atom_bios = false;
1609 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1610 adev->mc.gtt_size = 512 * 1024 * 1024;
1611 adev->accel_working = false;
1612 adev->num_rings = 0;
1613 adev->mman.buffer_funcs = NULL;
1614 adev->mman.buffer_funcs_ring = NULL;
1615 adev->vm_manager.vm_pte_funcs = NULL;
1616 adev->vm_manager.vm_pte_num_rings = 0;
1617 adev->gart.gart_funcs = NULL;
1618 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1619
1620 adev->smc_rreg = &amdgpu_invalid_rreg;
1621 adev->smc_wreg = &amdgpu_invalid_wreg;
1622 adev->pcie_rreg = &amdgpu_invalid_rreg;
1623 adev->pcie_wreg = &amdgpu_invalid_wreg;
1624 adev->pciep_rreg = &amdgpu_invalid_rreg;
1625 adev->pciep_wreg = &amdgpu_invalid_wreg;
1626 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1627 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1628 adev->didt_rreg = &amdgpu_invalid_rreg;
1629 adev->didt_wreg = &amdgpu_invalid_wreg;
1630 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1631 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1632 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1633 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1634
1635
1636 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1637 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1638 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1639
1640
1641
1642 mutex_init(&adev->vm_manager.lock);
1643 atomic_set(&adev->irq.ih.lock, 0);
1644 mutex_init(&adev->pm.mutex);
1645 mutex_init(&adev->gfx.gpu_clock_mutex);
1646 mutex_init(&adev->srbm_mutex);
1647 mutex_init(&adev->grbm_idx_mutex);
1648 mutex_init(&adev->mn_lock);
1649 hash_init(adev->mn_hash);
1650
1651 amdgpu_check_arguments(adev);
1652
1653
1654
1655 spin_lock_init(&adev->mmio_idx_lock);
1656 spin_lock_init(&adev->smc_idx_lock);
1657 spin_lock_init(&adev->pcie_idx_lock);
1658 spin_lock_init(&adev->uvd_ctx_idx_lock);
1659 spin_lock_init(&adev->didt_idx_lock);
1660 spin_lock_init(&adev->gc_cac_idx_lock);
1661 spin_lock_init(&adev->audio_endpt_idx_lock);
1662 spin_lock_init(&adev->mm_stats.lock);
1663
1664 INIT_LIST_HEAD(&adev->shadow_list);
1665 mutex_init(&adev->shadow_list_lock);
1666
1667 INIT_LIST_HEAD(&adev->gtt_list);
1668 spin_lock_init(&adev->gtt_list_lock);
1669
1670 if (adev->asic_type >= CHIP_BONAIRE) {
1671 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1672 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1673 } else {
1674 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1675 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1676 }
1677
1678 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1679 if (adev->rmmio == NULL) {
1680 return -ENOMEM;
1681 }
1682 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1683 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1684
1685 if (adev->asic_type >= CHIP_BONAIRE)
1686
1687 amdgpu_doorbell_init(adev);
1688
1689
1690 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1691 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1692 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1693 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1694 break;
1695 }
1696 }
1697 if (adev->rio_mem == NULL)
1698 DRM_ERROR("Unable to find PCI I/O BAR\n");
1699
1700
1701 r = amdgpu_early_init(adev);
1702 if (r)
1703 return r;
1704
1705
1706
1707
1708 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1709
1710 if (amdgpu_runtime_pm == 1)
1711 runtime = true;
1712 if (amdgpu_device_is_px(ddev))
1713 runtime = true;
1714 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1715 if (runtime)
1716 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1717
1718
1719 if (!amdgpu_get_bios(adev)) {
1720 r = -EINVAL;
1721 goto failed;
1722 }
1723
1724 if (!adev->is_atom_bios) {
1725 dev_err(adev->dev, "Expecting atombios for GPU\n");
1726 r = -EINVAL;
1727 goto failed;
1728 }
1729 r = amdgpu_atombios_init(adev);
1730 if (r) {
1731 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1732 goto failed;
1733 }
1734
1735
1736 amdgpu_device_detect_sriov_bios(adev);
1737
1738
1739 if (amdgpu_vpost_needed(adev)) {
1740 if (!adev->bios) {
1741 dev_err(adev->dev, "no vBIOS found\n");
1742 r = -EINVAL;
1743 goto failed;
1744 }
1745 DRM_INFO("GPU posting now...\n");
1746 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1747 if (r) {
1748 dev_err(adev->dev, "gpu post error!\n");
1749 goto failed;
1750 }
1751 } else {
1752 DRM_INFO("GPU post is not needed\n");
1753 }
1754
1755
1756 r = amdgpu_atombios_get_clock_info(adev);
1757 if (r) {
1758 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1759 goto failed;
1760 }
1761
1762 amdgpu_atombios_i2c_init(adev);
1763
1764
1765 r = amdgpu_fence_driver_init(adev);
1766 if (r) {
1767 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1768 goto failed;
1769 }
1770
1771
1772 drm_mode_config_init(adev->ddev);
1773
1774 r = amdgpu_init(adev);
1775 if (r) {
1776 dev_err(adev->dev, "amdgpu_init failed\n");
1777 amdgpu_fini(adev);
1778 goto failed;
1779 }
1780
1781 adev->accel_working = true;
1782
1783
1784 if (amdgpu_moverate >= 0)
1785 max_MBps = amdgpu_moverate;
1786 else
1787 max_MBps = 8;
1788
1789 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1790
1791 amdgpu_fbdev_init(adev);
1792
1793 r = amdgpu_ib_pool_init(adev);
1794 if (r) {
1795 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1796 goto failed;
1797 }
1798
1799 r = amdgpu_ib_ring_tests(adev);
1800 if (r)
1801 DRM_ERROR("ib ring test failed (%d).\n", r);
1802
1803 r = amdgpu_gem_debugfs_init(adev);
1804 if (r) {
1805 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1806 }
1807
1808 r = amdgpu_debugfs_regs_init(adev);
1809 if (r) {
1810 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1811 }
1812
1813 r = amdgpu_debugfs_firmware_init(adev);
1814 if (r) {
1815 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1816 return r;
1817 }
1818
1819 if ((amdgpu_testing & 1)) {
1820 if (adev->accel_working)
1821 amdgpu_test_moves(adev);
1822 else
1823 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1824 }
1825 if ((amdgpu_testing & 2)) {
1826 if (adev->accel_working)
1827 amdgpu_test_syncing(adev);
1828 else
1829 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1830 }
1831 if (amdgpu_benchmarking) {
1832 if (adev->accel_working)
1833 amdgpu_benchmark(adev, amdgpu_benchmarking);
1834 else
1835 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1836 }
1837
1838
1839
1840
1841 r = amdgpu_late_init(adev);
1842 if (r) {
1843 dev_err(adev->dev, "amdgpu_late_init failed\n");
1844 goto failed;
1845 }
1846
1847 return 0;
1848
1849failed:
1850 if (runtime)
1851 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1852 return r;
1853}
1854
1855static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865void amdgpu_device_fini(struct amdgpu_device *adev)
1866{
1867 int r;
1868
1869 DRM_INFO("amdgpu: finishing device.\n");
1870 adev->shutdown = true;
1871 drm_crtc_force_disable_all(adev->ddev);
1872
1873 amdgpu_bo_evict_vram(adev);
1874 amdgpu_ib_pool_fini(adev);
1875 amdgpu_fence_driver_fini(adev);
1876 amdgpu_fbdev_fini(adev);
1877 r = amdgpu_fini(adev);
1878 adev->accel_working = false;
1879
1880 amdgpu_i2c_fini(adev);
1881 amdgpu_atombios_fini(adev);
1882 kfree(adev->bios);
1883 adev->bios = NULL;
1884 vga_switcheroo_unregister_client(adev->pdev);
1885 if (adev->flags & AMD_IS_PX)
1886 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1887 vga_client_register(adev->pdev, NULL, NULL, NULL);
1888 if (adev->rio_mem)
1889 pci_iounmap(adev->pdev, adev->rio_mem);
1890 adev->rio_mem = NULL;
1891 iounmap(adev->rmmio);
1892 adev->rmmio = NULL;
1893 if (adev->asic_type >= CHIP_BONAIRE)
1894 amdgpu_doorbell_fini(adev);
1895 amdgpu_debugfs_regs_cleanup(adev);
1896 amdgpu_debugfs_remove_files(adev);
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1914{
1915 struct amdgpu_device *adev;
1916 struct drm_crtc *crtc;
1917 struct drm_connector *connector;
1918 int r;
1919
1920 if (dev == NULL || dev->dev_private == NULL) {
1921 return -ENODEV;
1922 }
1923
1924 adev = dev->dev_private;
1925
1926 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1927 return 0;
1928
1929 drm_kms_helper_poll_disable(dev);
1930
1931
1932 drm_modeset_lock_all(dev);
1933 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1934 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1935 }
1936 drm_modeset_unlock_all(dev);
1937
1938
1939 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1940 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1941 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1942 struct amdgpu_bo *robj;
1943
1944 if (amdgpu_crtc->cursor_bo) {
1945 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1946 r = amdgpu_bo_reserve(aobj, false);
1947 if (r == 0) {
1948 amdgpu_bo_unpin(aobj);
1949 amdgpu_bo_unreserve(aobj);
1950 }
1951 }
1952
1953 if (rfb == NULL || rfb->obj == NULL) {
1954 continue;
1955 }
1956 robj = gem_to_amdgpu_bo(rfb->obj);
1957
1958 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1959 r = amdgpu_bo_reserve(robj, false);
1960 if (r == 0) {
1961 amdgpu_bo_unpin(robj);
1962 amdgpu_bo_unreserve(robj);
1963 }
1964 }
1965 }
1966
1967 amdgpu_bo_evict_vram(adev);
1968
1969 amdgpu_fence_driver_suspend(adev);
1970
1971 r = amdgpu_suspend(adev);
1972
1973
1974
1975
1976
1977 amdgpu_bo_evict_vram(adev);
1978
1979 amdgpu_atombios_scratch_regs_save(adev);
1980 pci_save_state(dev->pdev);
1981 if (suspend) {
1982
1983 pci_disable_device(dev->pdev);
1984 pci_set_power_state(dev->pdev, PCI_D3hot);
1985 } else {
1986 r = amdgpu_asic_reset(adev);
1987 if (r)
1988 DRM_ERROR("amdgpu asic reset failed\n");
1989 }
1990
1991 if (fbcon) {
1992 console_lock();
1993 amdgpu_fbdev_set_suspend(adev, 1);
1994 console_unlock();
1995 }
1996 return 0;
1997}
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2009{
2010 struct drm_connector *connector;
2011 struct amdgpu_device *adev = dev->dev_private;
2012 struct drm_crtc *crtc;
2013 int r;
2014
2015 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2016 return 0;
2017
2018 if (fbcon)
2019 console_lock();
2020
2021 if (resume) {
2022 pci_set_power_state(dev->pdev, PCI_D0);
2023 pci_restore_state(dev->pdev);
2024 r = pci_enable_device(dev->pdev);
2025 if (r) {
2026 if (fbcon)
2027 console_unlock();
2028 return r;
2029 }
2030 }
2031 amdgpu_atombios_scratch_regs_restore(adev);
2032
2033
2034 if (!amdgpu_card_posted(adev) || !resume) {
2035 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2036 if (r)
2037 DRM_ERROR("amdgpu asic init failed\n");
2038 }
2039
2040 r = amdgpu_resume(adev);
2041 if (r)
2042 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2043
2044 amdgpu_fence_driver_resume(adev);
2045
2046 if (resume) {
2047 r = amdgpu_ib_ring_tests(adev);
2048 if (r)
2049 DRM_ERROR("ib ring test failed (%d).\n", r);
2050 }
2051
2052 r = amdgpu_late_init(adev);
2053 if (r)
2054 return r;
2055
2056
2057 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2058 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2059
2060 if (amdgpu_crtc->cursor_bo) {
2061 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2062 r = amdgpu_bo_reserve(aobj, false);
2063 if (r == 0) {
2064 r = amdgpu_bo_pin(aobj,
2065 AMDGPU_GEM_DOMAIN_VRAM,
2066 &amdgpu_crtc->cursor_addr);
2067 if (r != 0)
2068 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2069 amdgpu_bo_unreserve(aobj);
2070 }
2071 }
2072 }
2073
2074
2075 if (fbcon) {
2076 drm_helper_resume_force_mode(dev);
2077
2078 drm_modeset_lock_all(dev);
2079 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2080 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2081 }
2082 drm_modeset_unlock_all(dev);
2083 }
2084
2085 drm_kms_helper_poll_enable(dev);
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096#ifdef CONFIG_PM
2097 dev->dev->power.disable_depth++;
2098#endif
2099 drm_helper_hpd_irq_event(dev);
2100#ifdef CONFIG_PM
2101 dev->dev->power.disable_depth--;
2102#endif
2103
2104 if (fbcon) {
2105 amdgpu_fbdev_set_suspend(adev, 0);
2106 console_unlock();
2107 }
2108
2109 return 0;
2110}
2111
2112static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2113{
2114 int i;
2115 bool asic_hang = false;
2116
2117 for (i = 0; i < adev->num_ip_blocks; i++) {
2118 if (!adev->ip_blocks[i].status.valid)
2119 continue;
2120 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2121 adev->ip_blocks[i].status.hang =
2122 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2123 if (adev->ip_blocks[i].status.hang) {
2124 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2125 asic_hang = true;
2126 }
2127 }
2128 return asic_hang;
2129}
2130
2131static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2132{
2133 int i, r = 0;
2134
2135 for (i = 0; i < adev->num_ip_blocks; i++) {
2136 if (!adev->ip_blocks[i].status.valid)
2137 continue;
2138 if (adev->ip_blocks[i].status.hang &&
2139 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2140 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2141 if (r)
2142 return r;
2143 }
2144 }
2145
2146 return 0;
2147}
2148
2149static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2150{
2151 int i;
2152
2153 for (i = 0; i < adev->num_ip_blocks; i++) {
2154 if (!adev->ip_blocks[i].status.valid)
2155 continue;
2156 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2157 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2158 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2159 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2160 if (adev->ip_blocks[i].status.hang) {
2161 DRM_INFO("Some block need full reset!\n");
2162 return true;
2163 }
2164 }
2165 }
2166 return false;
2167}
2168
2169static int amdgpu_soft_reset(struct amdgpu_device *adev)
2170{
2171 int i, r = 0;
2172
2173 for (i = 0; i < adev->num_ip_blocks; i++) {
2174 if (!adev->ip_blocks[i].status.valid)
2175 continue;
2176 if (adev->ip_blocks[i].status.hang &&
2177 adev->ip_blocks[i].version->funcs->soft_reset) {
2178 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2179 if (r)
2180 return r;
2181 }
2182 }
2183
2184 return 0;
2185}
2186
2187static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2188{
2189 int i, r = 0;
2190
2191 for (i = 0; i < adev->num_ip_blocks; i++) {
2192 if (!adev->ip_blocks[i].status.valid)
2193 continue;
2194 if (adev->ip_blocks[i].status.hang &&
2195 adev->ip_blocks[i].version->funcs->post_soft_reset)
2196 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2197 if (r)
2198 return r;
2199 }
2200
2201 return 0;
2202}
2203
2204bool amdgpu_need_backup(struct amdgpu_device *adev)
2205{
2206 if (adev->flags & AMD_IS_APU)
2207 return false;
2208
2209 return amdgpu_lockup_timeout > 0 ? true : false;
2210}
2211
2212static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2213 struct amdgpu_ring *ring,
2214 struct amdgpu_bo *bo,
2215 struct dma_fence **fence)
2216{
2217 uint32_t domain;
2218 int r;
2219
2220 if (!bo->shadow)
2221 return 0;
2222
2223 r = amdgpu_bo_reserve(bo, false);
2224 if (r)
2225 return r;
2226 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2227
2228 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2229 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2230 NULL, fence, true);
2231 if (r) {
2232 DRM_ERROR("recover page table failed!\n");
2233 goto err;
2234 }
2235 }
2236err:
2237 amdgpu_bo_unreserve(bo);
2238 return r;
2239}
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249int amdgpu_gpu_reset(struct amdgpu_device *adev)
2250{
2251 int i, r;
2252 int resched;
2253 bool need_full_reset;
2254
2255 if (!amdgpu_check_soft_reset(adev)) {
2256 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2257 return 0;
2258 }
2259
2260 atomic_inc(&adev->gpu_reset_counter);
2261
2262
2263 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2264
2265
2266 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2267 struct amdgpu_ring *ring = adev->rings[i];
2268
2269 if (!ring)
2270 continue;
2271 kthread_park(ring->sched.thread);
2272 amd_sched_hw_job_reset(&ring->sched);
2273 }
2274
2275 amdgpu_fence_driver_force_completion(adev);
2276
2277 need_full_reset = amdgpu_need_full_reset(adev);
2278
2279 if (!need_full_reset) {
2280 amdgpu_pre_soft_reset(adev);
2281 r = amdgpu_soft_reset(adev);
2282 amdgpu_post_soft_reset(adev);
2283 if (r || amdgpu_check_soft_reset(adev)) {
2284 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2285 need_full_reset = true;
2286 }
2287 }
2288
2289 if (need_full_reset) {
2290 r = amdgpu_suspend(adev);
2291
2292retry:
2293
2294 if (adev->mode_info.num_crtc) {
2295 struct amdgpu_mode_mc_save save;
2296 amdgpu_display_stop_mc_access(adev, &save);
2297 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2298 }
2299 amdgpu_atombios_scratch_regs_save(adev);
2300 r = amdgpu_asic_reset(adev);
2301 amdgpu_atombios_scratch_regs_restore(adev);
2302
2303 amdgpu_atom_asic_init(adev->mode_info.atom_context);
2304
2305 if (!r) {
2306 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2307 r = amdgpu_resume(adev);
2308 }
2309 }
2310 if (!r) {
2311 amdgpu_irq_gpu_reset_resume_helper(adev);
2312 if (need_full_reset && amdgpu_need_backup(adev)) {
2313 r = amdgpu_ttm_recover_gart(adev);
2314 if (r)
2315 DRM_ERROR("gart recovery failed!!!\n");
2316 }
2317 r = amdgpu_ib_ring_tests(adev);
2318 if (r) {
2319 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2320 r = amdgpu_suspend(adev);
2321 need_full_reset = true;
2322 goto retry;
2323 }
2324
2325
2326
2327
2328 if (need_full_reset && amdgpu_need_backup(adev)) {
2329 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2330 struct amdgpu_bo *bo, *tmp;
2331 struct dma_fence *fence = NULL, *next = NULL;
2332
2333 DRM_INFO("recover vram bo from shadow\n");
2334 mutex_lock(&adev->shadow_list_lock);
2335 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2336 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2337 if (fence) {
2338 r = dma_fence_wait(fence, false);
2339 if (r) {
2340 WARN(r, "recovery from shadow isn't comleted\n");
2341 break;
2342 }
2343 }
2344
2345 dma_fence_put(fence);
2346 fence = next;
2347 }
2348 mutex_unlock(&adev->shadow_list_lock);
2349 if (fence) {
2350 r = dma_fence_wait(fence, false);
2351 if (r)
2352 WARN(r, "recovery from shadow isn't comleted\n");
2353 }
2354 dma_fence_put(fence);
2355 }
2356 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2357 struct amdgpu_ring *ring = adev->rings[i];
2358 if (!ring)
2359 continue;
2360
2361 amd_sched_job_recovery(&ring->sched);
2362 kthread_unpark(ring->sched.thread);
2363 }
2364 } else {
2365 dev_err(adev->dev, "asic resume failed (%d).\n", r);
2366 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2367 if (adev->rings[i]) {
2368 kthread_unpark(adev->rings[i]->sched.thread);
2369 }
2370 }
2371 }
2372
2373 drm_helper_resume_force_mode(adev->ddev);
2374
2375 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2376 if (r) {
2377
2378 dev_info(adev->dev, "GPU reset failed\n");
2379 }
2380
2381 return r;
2382}
2383
2384void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2385{
2386 u32 mask;
2387 int ret;
2388
2389 if (amdgpu_pcie_gen_cap)
2390 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2391
2392 if (amdgpu_pcie_lane_cap)
2393 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2394
2395
2396 if (pci_is_root_bus(adev->pdev->bus)) {
2397 if (adev->pm.pcie_gen_mask == 0)
2398 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2399 if (adev->pm.pcie_mlw_mask == 0)
2400 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2401 return;
2402 }
2403
2404 if (adev->pm.pcie_gen_mask == 0) {
2405 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2406 if (!ret) {
2407 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2408 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2409 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2410
2411 if (mask & DRM_PCIE_SPEED_25)
2412 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2413 if (mask & DRM_PCIE_SPEED_50)
2414 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2415 if (mask & DRM_PCIE_SPEED_80)
2416 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2417 } else {
2418 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2419 }
2420 }
2421 if (adev->pm.pcie_mlw_mask == 0) {
2422 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2423 if (!ret) {
2424 switch (mask) {
2425 case 32:
2426 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2427 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2428 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2429 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2430 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2431 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2432 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2433 break;
2434 case 16:
2435 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2436 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2437 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2438 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2439 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2440 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2441 break;
2442 case 12:
2443 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2444 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2445 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2446 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2447 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2448 break;
2449 case 8:
2450 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2451 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2452 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2453 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2454 break;
2455 case 4:
2456 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2457 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2458 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2459 break;
2460 case 2:
2461 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2462 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2463 break;
2464 case 1:
2465 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2466 break;
2467 default:
2468 break;
2469 }
2470 } else {
2471 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2472 }
2473 }
2474}
2475
2476
2477
2478
2479int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
2480 const struct drm_info_list *files,
2481 unsigned nfiles)
2482{
2483 unsigned i;
2484
2485 for (i = 0; i < adev->debugfs_count; i++) {
2486 if (adev->debugfs[i].files == files) {
2487
2488 return 0;
2489 }
2490 }
2491
2492 i = adev->debugfs_count + 1;
2493 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2494 DRM_ERROR("Reached maximum number of debugfs components.\n");
2495 DRM_ERROR("Report so we increase "
2496 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2497 return -EINVAL;
2498 }
2499 adev->debugfs[adev->debugfs_count].files = files;
2500 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2501 adev->debugfs_count = i;
2502#if defined(CONFIG_DEBUG_FS)
2503 drm_debugfs_create_files(files, nfiles,
2504 adev->ddev->primary->debugfs_root,
2505 adev->ddev->primary);
2506#endif
2507 return 0;
2508}
2509
2510static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2511{
2512#if defined(CONFIG_DEBUG_FS)
2513 unsigned i;
2514
2515 for (i = 0; i < adev->debugfs_count; i++) {
2516 drm_debugfs_remove_files(adev->debugfs[i].files,
2517 adev->debugfs[i].num_files,
2518 adev->ddev->primary);
2519 }
2520#endif
2521}
2522
2523#if defined(CONFIG_DEBUG_FS)
2524
2525static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2526 size_t size, loff_t *pos)
2527{
2528 struct amdgpu_device *adev = file_inode(f)->i_private;
2529 ssize_t result = 0;
2530 int r;
2531 bool pm_pg_lock, use_bank;
2532 unsigned instance_bank, sh_bank, se_bank;
2533
2534 if (size & 0x3 || *pos & 0x3)
2535 return -EINVAL;
2536
2537
2538 pm_pg_lock = (*pos >> 23) & 1;
2539
2540 if (*pos & (1ULL << 62)) {
2541 se_bank = (*pos >> 24) & 0x3FF;
2542 sh_bank = (*pos >> 34) & 0x3FF;
2543 instance_bank = (*pos >> 44) & 0x3FF;
2544
2545 if (se_bank == 0x3FF)
2546 se_bank = 0xFFFFFFFF;
2547 if (sh_bank == 0x3FF)
2548 sh_bank = 0xFFFFFFFF;
2549 if (instance_bank == 0x3FF)
2550 instance_bank = 0xFFFFFFFF;
2551 use_bank = 1;
2552 } else {
2553 use_bank = 0;
2554 }
2555
2556 *pos &= 0x3FFFF;
2557
2558 if (use_bank) {
2559 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2560 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2561 return -EINVAL;
2562 mutex_lock(&adev->grbm_idx_mutex);
2563 amdgpu_gfx_select_se_sh(adev, se_bank,
2564 sh_bank, instance_bank);
2565 }
2566
2567 if (pm_pg_lock)
2568 mutex_lock(&adev->pm.mutex);
2569
2570 while (size) {
2571 uint32_t value;
2572
2573 if (*pos > adev->rmmio_size)
2574 goto end;
2575
2576 value = RREG32(*pos >> 2);
2577 r = put_user(value, (uint32_t *)buf);
2578 if (r) {
2579 result = r;
2580 goto end;
2581 }
2582
2583 result += 4;
2584 buf += 4;
2585 *pos += 4;
2586 size -= 4;
2587 }
2588
2589end:
2590 if (use_bank) {
2591 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2592 mutex_unlock(&adev->grbm_idx_mutex);
2593 }
2594
2595 if (pm_pg_lock)
2596 mutex_unlock(&adev->pm.mutex);
2597
2598 return result;
2599}
2600
2601static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2602 size_t size, loff_t *pos)
2603{
2604 struct amdgpu_device *adev = file_inode(f)->i_private;
2605 ssize_t result = 0;
2606 int r;
2607 bool pm_pg_lock, use_bank;
2608 unsigned instance_bank, sh_bank, se_bank;
2609
2610 if (size & 0x3 || *pos & 0x3)
2611 return -EINVAL;
2612
2613
2614 pm_pg_lock = (*pos >> 23) & 1;
2615
2616 if (*pos & (1ULL << 62)) {
2617 se_bank = (*pos >> 24) & 0x3FF;
2618 sh_bank = (*pos >> 34) & 0x3FF;
2619 instance_bank = (*pos >> 44) & 0x3FF;
2620
2621 if (se_bank == 0x3FF)
2622 se_bank = 0xFFFFFFFF;
2623 if (sh_bank == 0x3FF)
2624 sh_bank = 0xFFFFFFFF;
2625 if (instance_bank == 0x3FF)
2626 instance_bank = 0xFFFFFFFF;
2627 use_bank = 1;
2628 } else {
2629 use_bank = 0;
2630 }
2631
2632 *pos &= 0x3FFFF;
2633
2634 if (use_bank) {
2635 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2636 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2637 return -EINVAL;
2638 mutex_lock(&adev->grbm_idx_mutex);
2639 amdgpu_gfx_select_se_sh(adev, se_bank,
2640 sh_bank, instance_bank);
2641 }
2642
2643 if (pm_pg_lock)
2644 mutex_lock(&adev->pm.mutex);
2645
2646 while (size) {
2647 uint32_t value;
2648
2649 if (*pos > adev->rmmio_size)
2650 return result;
2651
2652 r = get_user(value, (uint32_t *)buf);
2653 if (r)
2654 return r;
2655
2656 WREG32(*pos >> 2, value);
2657
2658 result += 4;
2659 buf += 4;
2660 *pos += 4;
2661 size -= 4;
2662 }
2663
2664 if (use_bank) {
2665 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2666 mutex_unlock(&adev->grbm_idx_mutex);
2667 }
2668
2669 if (pm_pg_lock)
2670 mutex_unlock(&adev->pm.mutex);
2671
2672 return result;
2673}
2674
2675static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2676 size_t size, loff_t *pos)
2677{
2678 struct amdgpu_device *adev = file_inode(f)->i_private;
2679 ssize_t result = 0;
2680 int r;
2681
2682 if (size & 0x3 || *pos & 0x3)
2683 return -EINVAL;
2684
2685 while (size) {
2686 uint32_t value;
2687
2688 value = RREG32_PCIE(*pos >> 2);
2689 r = put_user(value, (uint32_t *)buf);
2690 if (r)
2691 return r;
2692
2693 result += 4;
2694 buf += 4;
2695 *pos += 4;
2696 size -= 4;
2697 }
2698
2699 return result;
2700}
2701
2702static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2703 size_t size, loff_t *pos)
2704{
2705 struct amdgpu_device *adev = file_inode(f)->i_private;
2706 ssize_t result = 0;
2707 int r;
2708
2709 if (size & 0x3 || *pos & 0x3)
2710 return -EINVAL;
2711
2712 while (size) {
2713 uint32_t value;
2714
2715 r = get_user(value, (uint32_t *)buf);
2716 if (r)
2717 return r;
2718
2719 WREG32_PCIE(*pos >> 2, value);
2720
2721 result += 4;
2722 buf += 4;
2723 *pos += 4;
2724 size -= 4;
2725 }
2726
2727 return result;
2728}
2729
2730static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2731 size_t size, loff_t *pos)
2732{
2733 struct amdgpu_device *adev = file_inode(f)->i_private;
2734 ssize_t result = 0;
2735 int r;
2736
2737 if (size & 0x3 || *pos & 0x3)
2738 return -EINVAL;
2739
2740 while (size) {
2741 uint32_t value;
2742
2743 value = RREG32_DIDT(*pos >> 2);
2744 r = put_user(value, (uint32_t *)buf);
2745 if (r)
2746 return r;
2747
2748 result += 4;
2749 buf += 4;
2750 *pos += 4;
2751 size -= 4;
2752 }
2753
2754 return result;
2755}
2756
2757static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2758 size_t size, loff_t *pos)
2759{
2760 struct amdgpu_device *adev = file_inode(f)->i_private;
2761 ssize_t result = 0;
2762 int r;
2763
2764 if (size & 0x3 || *pos & 0x3)
2765 return -EINVAL;
2766
2767 while (size) {
2768 uint32_t value;
2769
2770 r = get_user(value, (uint32_t *)buf);
2771 if (r)
2772 return r;
2773
2774 WREG32_DIDT(*pos >> 2, value);
2775
2776 result += 4;
2777 buf += 4;
2778 *pos += 4;
2779 size -= 4;
2780 }
2781
2782 return result;
2783}
2784
2785static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2786 size_t size, loff_t *pos)
2787{
2788 struct amdgpu_device *adev = file_inode(f)->i_private;
2789 ssize_t result = 0;
2790 int r;
2791
2792 if (size & 0x3 || *pos & 0x3)
2793 return -EINVAL;
2794
2795 while (size) {
2796 uint32_t value;
2797
2798 value = RREG32_SMC(*pos);
2799 r = put_user(value, (uint32_t *)buf);
2800 if (r)
2801 return r;
2802
2803 result += 4;
2804 buf += 4;
2805 *pos += 4;
2806 size -= 4;
2807 }
2808
2809 return result;
2810}
2811
2812static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2813 size_t size, loff_t *pos)
2814{
2815 struct amdgpu_device *adev = file_inode(f)->i_private;
2816 ssize_t result = 0;
2817 int r;
2818
2819 if (size & 0x3 || *pos & 0x3)
2820 return -EINVAL;
2821
2822 while (size) {
2823 uint32_t value;
2824
2825 r = get_user(value, (uint32_t *)buf);
2826 if (r)
2827 return r;
2828
2829 WREG32_SMC(*pos, value);
2830
2831 result += 4;
2832 buf += 4;
2833 *pos += 4;
2834 size -= 4;
2835 }
2836
2837 return result;
2838}
2839
2840static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2841 size_t size, loff_t *pos)
2842{
2843 struct amdgpu_device *adev = file_inode(f)->i_private;
2844 ssize_t result = 0;
2845 int r;
2846 uint32_t *config, no_regs = 0;
2847
2848 if (size & 0x3 || *pos & 0x3)
2849 return -EINVAL;
2850
2851 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
2852 if (!config)
2853 return -ENOMEM;
2854
2855
2856 config[no_regs++] = 2;
2857 config[no_regs++] = adev->gfx.config.max_shader_engines;
2858 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2859 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2860 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2861 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2862 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2863 config[no_regs++] = adev->gfx.config.max_gprs;
2864 config[no_regs++] = adev->gfx.config.max_gs_threads;
2865 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2866 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2867 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2868 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2869 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2870 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2871 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2872 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2873 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2874 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2875 config[no_regs++] = adev->gfx.config.num_gpus;
2876 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2877 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2878 config[no_regs++] = adev->gfx.config.gb_addr_config;
2879 config[no_regs++] = adev->gfx.config.num_rbs;
2880
2881
2882 config[no_regs++] = adev->rev_id;
2883 config[no_regs++] = adev->pg_flags;
2884 config[no_regs++] = adev->cg_flags;
2885
2886
2887 config[no_regs++] = adev->family;
2888 config[no_regs++] = adev->external_rev_id;
2889
2890 while (size && (*pos < no_regs * 4)) {
2891 uint32_t value;
2892
2893 value = config[*pos >> 2];
2894 r = put_user(value, (uint32_t *)buf);
2895 if (r) {
2896 kfree(config);
2897 return r;
2898 }
2899
2900 result += 4;
2901 buf += 4;
2902 *pos += 4;
2903 size -= 4;
2904 }
2905
2906 kfree(config);
2907 return result;
2908}
2909
2910static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2911 size_t size, loff_t *pos)
2912{
2913 struct amdgpu_device *adev = file_inode(f)->i_private;
2914 int idx, r;
2915 int32_t value;
2916
2917 if (size != 4 || *pos & 0x3)
2918 return -EINVAL;
2919
2920
2921 idx = *pos >> 2;
2922
2923 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2924 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2925 else
2926 return -EINVAL;
2927
2928 if (!r)
2929 r = put_user(value, (int32_t *)buf);
2930
2931 return !r ? 4 : r;
2932}
2933
2934static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
2935 size_t size, loff_t *pos)
2936{
2937 struct amdgpu_device *adev = f->f_inode->i_private;
2938 int r, x;
2939 ssize_t result=0;
2940 uint32_t offset, se, sh, cu, wave, simd, data[32];
2941
2942 if (size & 3 || *pos & 3)
2943 return -EINVAL;
2944
2945
2946 offset = (*pos & 0x7F);
2947 se = ((*pos >> 7) & 0xFF);
2948 sh = ((*pos >> 15) & 0xFF);
2949 cu = ((*pos >> 23) & 0xFF);
2950 wave = ((*pos >> 31) & 0xFF);
2951 simd = ((*pos >> 37) & 0xFF);
2952
2953
2954 mutex_lock(&adev->grbm_idx_mutex);
2955 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
2956
2957 x = 0;
2958 if (adev->gfx.funcs->read_wave_data)
2959 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
2960
2961 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
2962 mutex_unlock(&adev->grbm_idx_mutex);
2963
2964 if (!x)
2965 return -EINVAL;
2966
2967 while (size && (offset < x * 4)) {
2968 uint32_t value;
2969
2970 value = data[offset >> 2];
2971 r = put_user(value, (uint32_t *)buf);
2972 if (r)
2973 return r;
2974
2975 result += 4;
2976 buf += 4;
2977 offset += 4;
2978 size -= 4;
2979 }
2980
2981 return result;
2982}
2983
2984static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
2985 size_t size, loff_t *pos)
2986{
2987 struct amdgpu_device *adev = f->f_inode->i_private;
2988 int r;
2989 ssize_t result = 0;
2990 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
2991
2992 if (size & 3 || *pos & 3)
2993 return -EINVAL;
2994
2995
2996 offset = (*pos & 0xFFF);
2997 se = ((*pos >> 12) & 0xFF);
2998 sh = ((*pos >> 20) & 0xFF);
2999 cu = ((*pos >> 28) & 0xFF);
3000 wave = ((*pos >> 36) & 0xFF);
3001 simd = ((*pos >> 44) & 0xFF);
3002 thread = ((*pos >> 52) & 0xFF);
3003 bank = ((*pos >> 60) & 1);
3004
3005 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
3006 if (!data)
3007 return -ENOMEM;
3008
3009
3010 mutex_lock(&adev->grbm_idx_mutex);
3011 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
3012
3013 if (bank == 0) {
3014 if (adev->gfx.funcs->read_wave_vgprs)
3015 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
3016 } else {
3017 if (adev->gfx.funcs->read_wave_sgprs)
3018 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
3019 }
3020
3021 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
3022 mutex_unlock(&adev->grbm_idx_mutex);
3023
3024 while (size) {
3025 uint32_t value;
3026
3027 value = data[offset++];
3028 r = put_user(value, (uint32_t *)buf);
3029 if (r) {
3030 result = r;
3031 goto err;
3032 }
3033
3034 result += 4;
3035 buf += 4;
3036 size -= 4;
3037 }
3038
3039err:
3040 kfree(data);
3041 return result;
3042}
3043
3044static const struct file_operations amdgpu_debugfs_regs_fops = {
3045 .owner = THIS_MODULE,
3046 .read = amdgpu_debugfs_regs_read,
3047 .write = amdgpu_debugfs_regs_write,
3048 .llseek = default_llseek
3049};
3050static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
3051 .owner = THIS_MODULE,
3052 .read = amdgpu_debugfs_regs_didt_read,
3053 .write = amdgpu_debugfs_regs_didt_write,
3054 .llseek = default_llseek
3055};
3056static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
3057 .owner = THIS_MODULE,
3058 .read = amdgpu_debugfs_regs_pcie_read,
3059 .write = amdgpu_debugfs_regs_pcie_write,
3060 .llseek = default_llseek
3061};
3062static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
3063 .owner = THIS_MODULE,
3064 .read = amdgpu_debugfs_regs_smc_read,
3065 .write = amdgpu_debugfs_regs_smc_write,
3066 .llseek = default_llseek
3067};
3068
3069static const struct file_operations amdgpu_debugfs_gca_config_fops = {
3070 .owner = THIS_MODULE,
3071 .read = amdgpu_debugfs_gca_config_read,
3072 .llseek = default_llseek
3073};
3074
3075static const struct file_operations amdgpu_debugfs_sensors_fops = {
3076 .owner = THIS_MODULE,
3077 .read = amdgpu_debugfs_sensor_read,
3078 .llseek = default_llseek
3079};
3080
3081static const struct file_operations amdgpu_debugfs_wave_fops = {
3082 .owner = THIS_MODULE,
3083 .read = amdgpu_debugfs_wave_read,
3084 .llseek = default_llseek
3085};
3086static const struct file_operations amdgpu_debugfs_gpr_fops = {
3087 .owner = THIS_MODULE,
3088 .read = amdgpu_debugfs_gpr_read,
3089 .llseek = default_llseek
3090};
3091
3092static const struct file_operations *debugfs_regs[] = {
3093 &amdgpu_debugfs_regs_fops,
3094 &amdgpu_debugfs_regs_didt_fops,
3095 &amdgpu_debugfs_regs_pcie_fops,
3096 &amdgpu_debugfs_regs_smc_fops,
3097 &amdgpu_debugfs_gca_config_fops,
3098 &amdgpu_debugfs_sensors_fops,
3099 &amdgpu_debugfs_wave_fops,
3100 &amdgpu_debugfs_gpr_fops,
3101};
3102
3103static const char *debugfs_regs_names[] = {
3104 "amdgpu_regs",
3105 "amdgpu_regs_didt",
3106 "amdgpu_regs_pcie",
3107 "amdgpu_regs_smc",
3108 "amdgpu_gca_config",
3109 "amdgpu_sensors",
3110 "amdgpu_wave",
3111 "amdgpu_gpr",
3112};
3113
3114static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3115{
3116 struct drm_minor *minor = adev->ddev->primary;
3117 struct dentry *ent, *root = minor->debugfs_root;
3118 unsigned i, j;
3119
3120 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3121 ent = debugfs_create_file(debugfs_regs_names[i],
3122 S_IFREG | S_IRUGO, root,
3123 adev, debugfs_regs[i]);
3124 if (IS_ERR(ent)) {
3125 for (j = 0; j < i; j++) {
3126 debugfs_remove(adev->debugfs_regs[i]);
3127 adev->debugfs_regs[i] = NULL;
3128 }
3129 return PTR_ERR(ent);
3130 }
3131
3132 if (!i)
3133 i_size_write(ent->d_inode, adev->rmmio_size);
3134 adev->debugfs_regs[i] = ent;
3135 }
3136
3137 return 0;
3138}
3139
3140static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
3141{
3142 unsigned i;
3143
3144 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
3145 if (adev->debugfs_regs[i]) {
3146 debugfs_remove(adev->debugfs_regs[i]);
3147 adev->debugfs_regs[i] = NULL;
3148 }
3149 }
3150}
3151
3152int amdgpu_debugfs_init(struct drm_minor *minor)
3153{
3154 return 0;
3155}
3156
3157void amdgpu_debugfs_cleanup(struct drm_minor *minor)
3158{
3159}
3160#else
3161static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
3162{
3163 return 0;
3164}
3165static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
3166#endif
3167