1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <linux/debugfs.h>
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/amdgpu_drm.h>
34#include <linux/vgaarb.h>
35#include <linux/vga_switcheroo.h>
36#include <linux/efi.h>
37#include "amdgpu.h"
38#include "amdgpu_i2c.h"
39#include "atom.h"
40#include "amdgpu_atombios.h"
41#ifdef CONFIG_DRM_AMDGPU_CIK
42#include "cik.h"
43#endif
44#include "vi.h"
45#include "bif/bif_4_1_d.h"
46
47static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
48static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
49
50static const char *amdgpu_asic_name[] = {
51 "BONAIRE",
52 "KAVERI",
53 "KABINI",
54 "HAWAII",
55 "MULLINS",
56 "TOPAZ",
57 "TONGA",
58 "CARRIZO",
59 "LAST",
60};
61
62bool amdgpu_device_is_px(struct drm_device *dev)
63{
64 struct amdgpu_device *adev = dev->dev_private;
65
66 if (adev->flags & AMDGPU_IS_PX)
67 return true;
68 return false;
69}
70
71
72
73
74uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
75 bool always_indirect)
76{
77 if ((reg * 4) < adev->rmmio_size && !always_indirect)
78 return readl(((void __iomem *)adev->rmmio) + (reg * 4));
79 else {
80 unsigned long flags;
81 uint32_t ret;
82
83 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
84 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
85 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
86 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
87
88 return ret;
89 }
90}
91
92void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
93 bool always_indirect)
94{
95 if ((reg * 4) < adev->rmmio_size && !always_indirect)
96 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
97 else {
98 unsigned long flags;
99
100 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
101 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
102 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
103 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
104 }
105}
106
107u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
108{
109 if ((reg * 4) < adev->rio_mem_size)
110 return ioread32(adev->rio_mem + (reg * 4));
111 else {
112 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
113 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
114 }
115}
116
117void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
118{
119
120 if ((reg * 4) < adev->rio_mem_size)
121 iowrite32(v, adev->rio_mem + (reg * 4));
122 else {
123 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
124 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
125 }
126}
127
128
129
130
131
132
133
134
135
136
137u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
138{
139 if (index < adev->doorbell.num_doorbells) {
140 return readl(adev->doorbell.ptr + index);
141 } else {
142 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
143 return 0;
144 }
145}
146
147
148
149
150
151
152
153
154
155
156
157void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
158{
159 if (index < adev->doorbell.num_doorbells) {
160 writel(v, adev->doorbell.ptr + index);
161 } else {
162 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
163 }
164}
165
166
167
168
169
170
171
172
173
174
175
176static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
177{
178 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
179 BUG();
180 return 0;
181}
182
183
184
185
186
187
188
189
190
191
192
193static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
194{
195 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
196 reg, v);
197 BUG();
198}
199
200
201
202
203
204
205
206
207
208
209
210
211static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
212 uint32_t block, uint32_t reg)
213{
214 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
215 reg, block);
216 BUG();
217 return 0;
218}
219
220
221
222
223
224
225
226
227
228
229
230
231static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
232 uint32_t block,
233 uint32_t reg, uint32_t v)
234{
235 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
236 reg, block, v);
237 BUG();
238}
239
240static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
241{
242 int r;
243
244 if (adev->vram_scratch.robj == NULL) {
245 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
246 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
247 NULL, &adev->vram_scratch.robj);
248 if (r) {
249 return r;
250 }
251 }
252
253 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
254 if (unlikely(r != 0))
255 return r;
256 r = amdgpu_bo_pin(adev->vram_scratch.robj,
257 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
258 if (r) {
259 amdgpu_bo_unreserve(adev->vram_scratch.robj);
260 return r;
261 }
262 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
263 (void **)&adev->vram_scratch.ptr);
264 if (r)
265 amdgpu_bo_unpin(adev->vram_scratch.robj);
266 amdgpu_bo_unreserve(adev->vram_scratch.robj);
267
268 return r;
269}
270
271static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
272{
273 int r;
274
275 if (adev->vram_scratch.robj == NULL) {
276 return;
277 }
278 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
279 if (likely(r == 0)) {
280 amdgpu_bo_kunmap(adev->vram_scratch.robj);
281 amdgpu_bo_unpin(adev->vram_scratch.robj);
282 amdgpu_bo_unreserve(adev->vram_scratch.robj);
283 }
284 amdgpu_bo_unref(&adev->vram_scratch.robj);
285}
286
287
288
289
290
291
292
293
294
295
296
297void amdgpu_program_register_sequence(struct amdgpu_device *adev,
298 const u32 *registers,
299 const u32 array_size)
300{
301 u32 tmp, reg, and_mask, or_mask;
302 int i;
303
304 if (array_size % 3)
305 return;
306
307 for (i = 0; i < array_size; i +=3) {
308 reg = registers[i + 0];
309 and_mask = registers[i + 1];
310 or_mask = registers[i + 2];
311
312 if (and_mask == 0xffffffff) {
313 tmp = or_mask;
314 } else {
315 tmp = RREG32(reg);
316 tmp &= ~and_mask;
317 tmp |= or_mask;
318 }
319 WREG32(reg, tmp);
320 }
321}
322
323void amdgpu_pci_config_reset(struct amdgpu_device *adev)
324{
325 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
326}
327
328
329
330
331
332
333
334
335
336
337
338
339static int amdgpu_doorbell_init(struct amdgpu_device *adev)
340{
341
342 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
343 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
344
345 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
346 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
347 if (adev->doorbell.num_doorbells == 0)
348 return -EINVAL;
349
350 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
351 if (adev->doorbell.ptr == NULL) {
352 return -ENOMEM;
353 }
354 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
355 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
356
357 return 0;
358}
359
360
361
362
363
364
365
366
367static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
368{
369 iounmap(adev->doorbell.ptr);
370 adev->doorbell.ptr = NULL;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
387 phys_addr_t *aperture_base,
388 size_t *aperture_size,
389 size_t *start_offset)
390{
391
392
393
394
395 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
396 *aperture_base = adev->doorbell.base;
397 *aperture_size = adev->doorbell.size;
398 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
399 } else {
400 *aperture_base = 0;
401 *aperture_size = 0;
402 *start_offset = 0;
403 }
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421static void amdgpu_wb_fini(struct amdgpu_device *adev)
422{
423 if (adev->wb.wb_obj) {
424 if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
425 amdgpu_bo_kunmap(adev->wb.wb_obj);
426 amdgpu_bo_unpin(adev->wb.wb_obj);
427 amdgpu_bo_unreserve(adev->wb.wb_obj);
428 }
429 amdgpu_bo_unref(&adev->wb.wb_obj);
430 adev->wb.wb = NULL;
431 adev->wb.wb_obj = NULL;
432 }
433}
434
435
436
437
438
439
440
441
442
443
444static int amdgpu_wb_init(struct amdgpu_device *adev)
445{
446 int r;
447
448 if (adev->wb.wb_obj == NULL) {
449 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
450 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj);
451 if (r) {
452 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
453 return r;
454 }
455 r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
456 if (unlikely(r != 0)) {
457 amdgpu_wb_fini(adev);
458 return r;
459 }
460 r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
461 &adev->wb.gpu_addr);
462 if (r) {
463 amdgpu_bo_unreserve(adev->wb.wb_obj);
464 dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
465 amdgpu_wb_fini(adev);
466 return r;
467 }
468 r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
469 amdgpu_bo_unreserve(adev->wb.wb_obj);
470 if (r) {
471 dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
472 amdgpu_wb_fini(adev);
473 return r;
474 }
475
476 adev->wb.num_wb = AMDGPU_MAX_WB;
477 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
478
479
480 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
481 }
482
483 return 0;
484}
485
486
487
488
489
490
491
492
493
494
495int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
496{
497 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
498 if (offset < adev->wb.num_wb) {
499 __set_bit(offset, adev->wb.used);
500 *wb = offset;
501 return 0;
502 } else {
503 return -EINVAL;
504 }
505}
506
507
508
509
510
511
512
513
514
515void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
516{
517 if (wb < adev->wb.num_wb)
518 __clear_bit(wb, adev->wb.used);
519}
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
554{
555 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
556
557 mc->vram_start = base;
558 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
559 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
560 mc->real_vram_size = mc->aper_size;
561 mc->mc_vram_size = mc->aper_size;
562 }
563 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
564 if (limit && limit < mc->real_vram_size)
565 mc->real_vram_size = limit;
566 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
567 mc->mc_vram_size >> 20, mc->vram_start,
568 mc->vram_end, mc->real_vram_size >> 20);
569}
570
571
572
573
574
575
576
577
578
579
580
581
582
583void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
584{
585 u64 size_af, size_bf;
586
587 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
588 size_bf = mc->vram_start & ~mc->gtt_base_align;
589 if (size_bf > size_af) {
590 if (mc->gtt_size > size_bf) {
591 dev_warn(adev->dev, "limiting GTT\n");
592 mc->gtt_size = size_bf;
593 }
594 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
595 } else {
596 if (mc->gtt_size > size_af) {
597 dev_warn(adev->dev, "limiting GTT\n");
598 mc->gtt_size = size_af;
599 }
600 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
601 }
602 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
603 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
604 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
605}
606
607
608
609
610
611
612
613
614
615
616
617
618
619bool amdgpu_card_posted(struct amdgpu_device *adev)
620{
621 uint32_t reg;
622
623
624 reg = RREG32(mmCONFIG_MEMSIZE);
625
626 if (reg)
627 return true;
628
629 return false;
630
631}
632
633
634
635
636
637
638
639
640
641
642bool amdgpu_boot_test_post_card(struct amdgpu_device *adev)
643{
644 if (amdgpu_card_posted(adev))
645 return true;
646
647 if (adev->bios) {
648 DRM_INFO("GPU not posted. posting now...\n");
649 if (adev->is_atom_bios)
650 amdgpu_atom_asic_init(adev->mode_info.atom_context);
651 return true;
652 } else {
653 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
654 return false;
655 }
656}
657
658
659
660
661
662
663
664
665
666
667
668int amdgpu_dummy_page_init(struct amdgpu_device *adev)
669{
670 if (adev->dummy_page.page)
671 return 0;
672 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
673 if (adev->dummy_page.page == NULL)
674 return -ENOMEM;
675 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
676 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
677 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
678 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
679 __free_page(adev->dummy_page.page);
680 adev->dummy_page.page = NULL;
681 return -ENOMEM;
682 }
683 return 0;
684}
685
686
687
688
689
690
691
692
693void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
694{
695 if (adev->dummy_page.page == NULL)
696 return;
697 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
698 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
699 __free_page(adev->dummy_page.page);
700 adev->dummy_page.page = NULL;
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
723{
724 return 0;
725}
726
727
728
729
730
731
732
733
734
735
736static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
737{
738
739}
740
741
742
743
744
745
746
747
748
749
750static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
751{
752 return 0;
753}
754
755
756
757
758
759
760
761
762
763
764static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
765{
766
767}
768
769
770
771
772
773
774
775
776
777
778static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
779{
780 struct amdgpu_device *adev = info->dev->dev_private;
781
782 WREG32(reg, val);
783}
784
785
786
787
788
789
790
791
792
793
794static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
795{
796 struct amdgpu_device *adev = info->dev->dev_private;
797 uint32_t r;
798
799 r = RREG32(reg);
800 return r;
801}
802
803
804
805
806
807
808
809
810
811
812static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
813{
814 struct amdgpu_device *adev = info->dev->dev_private;
815
816 WREG32_IO(reg, val);
817}
818
819
820
821
822
823
824
825
826
827
828static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
829{
830 struct amdgpu_device *adev = info->dev->dev_private;
831 uint32_t r;
832
833 r = RREG32_IO(reg);
834 return r;
835}
836
837
838
839
840
841
842
843
844
845
846static void amdgpu_atombios_fini(struct amdgpu_device *adev)
847{
848 if (adev->mode_info.atom_context)
849 kfree(adev->mode_info.atom_context->scratch);
850 kfree(adev->mode_info.atom_context);
851 adev->mode_info.atom_context = NULL;
852 kfree(adev->mode_info.atom_card_info);
853 adev->mode_info.atom_card_info = NULL;
854}
855
856
857
858
859
860
861
862
863
864
865
866static int amdgpu_atombios_init(struct amdgpu_device *adev)
867{
868 struct card_info *atom_card_info =
869 kzalloc(sizeof(struct card_info), GFP_KERNEL);
870
871 if (!atom_card_info)
872 return -ENOMEM;
873
874 adev->mode_info.atom_card_info = atom_card_info;
875 atom_card_info->dev = adev->ddev;
876 atom_card_info->reg_read = cail_reg_read;
877 atom_card_info->reg_write = cail_reg_write;
878
879 if (adev->rio_mem) {
880 atom_card_info->ioreg_read = cail_ioreg_read;
881 atom_card_info->ioreg_write = cail_ioreg_write;
882 } else {
883 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
884 atom_card_info->ioreg_read = cail_reg_read;
885 atom_card_info->ioreg_write = cail_reg_write;
886 }
887 atom_card_info->mc_read = cail_mc_read;
888 atom_card_info->mc_write = cail_mc_write;
889 atom_card_info->pll_read = cail_pll_read;
890 atom_card_info->pll_write = cail_pll_write;
891
892 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
893 if (!adev->mode_info.atom_context) {
894 amdgpu_atombios_fini(adev);
895 return -ENOMEM;
896 }
897
898 mutex_init(&adev->mode_info.atom_context->mutex);
899 amdgpu_atombios_scratch_regs_init(adev);
900 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
901 return 0;
902}
903
904
905
906
907
908
909
910
911
912
913
914static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
915{
916 struct amdgpu_device *adev = cookie;
917 amdgpu_asic_set_vga_state(adev, state);
918 if (state)
919 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
920 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
921 else
922 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
923}
924
925
926
927
928
929
930
931
932
933static bool amdgpu_check_pot_argument(int arg)
934{
935 return (arg & (arg - 1)) == 0;
936}
937
938
939
940
941
942
943
944
945
946static void amdgpu_check_arguments(struct amdgpu_device *adev)
947{
948
949 if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
950 dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
951 amdgpu_vram_limit);
952 amdgpu_vram_limit = 0;
953 }
954
955 if (amdgpu_gart_size != -1) {
956
957 if (amdgpu_gart_size < 32) {
958 dev_warn(adev->dev, "gart size (%d) too small\n",
959 amdgpu_gart_size);
960 amdgpu_gart_size = -1;
961 } else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
962 dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
963 amdgpu_gart_size);
964 amdgpu_gart_size = -1;
965 }
966 }
967
968 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
969 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
970 amdgpu_vm_size);
971 amdgpu_vm_size = 8;
972 }
973
974 if (amdgpu_vm_size < 1) {
975 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
976 amdgpu_vm_size);
977 amdgpu_vm_size = 8;
978 }
979
980
981
982
983 if (amdgpu_vm_size > 1024) {
984 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
985 amdgpu_vm_size);
986 amdgpu_vm_size = 8;
987 }
988
989
990
991
992 if (amdgpu_vm_block_size == -1) {
993
994
995 unsigned bits = ilog2(amdgpu_vm_size) + 18;
996
997
998
999 if (amdgpu_vm_size <= 8)
1000 amdgpu_vm_block_size = bits - 9;
1001 else
1002 amdgpu_vm_block_size = (bits + 3) / 2;
1003
1004 } else if (amdgpu_vm_block_size < 9) {
1005 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1006 amdgpu_vm_block_size);
1007 amdgpu_vm_block_size = 9;
1008 }
1009
1010 if (amdgpu_vm_block_size > 24 ||
1011 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1012 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1013 amdgpu_vm_block_size);
1014 amdgpu_vm_block_size = 9;
1015 }
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1028{
1029 struct drm_device *dev = pci_get_drvdata(pdev);
1030
1031 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1032 return;
1033
1034 if (state == VGA_SWITCHEROO_ON) {
1035 unsigned d3_delay = dev->pdev->d3_delay;
1036
1037 printk(KERN_INFO "amdgpu: switched on\n");
1038
1039 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1040
1041 amdgpu_resume_kms(dev, true, true);
1042
1043 dev->pdev->d3_delay = d3_delay;
1044
1045 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1046 drm_kms_helper_poll_enable(dev);
1047 } else {
1048 printk(KERN_INFO "amdgpu: switched off\n");
1049 drm_kms_helper_poll_disable(dev);
1050 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1051 amdgpu_suspend_kms(dev, true, true);
1052 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1053 }
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1066{
1067 struct drm_device *dev = pci_get_drvdata(pdev);
1068
1069
1070
1071
1072
1073
1074 return dev->open_count == 0;
1075}
1076
1077static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1078 .set_gpu_state = amdgpu_switcheroo_set_state,
1079 .reprobe = NULL,
1080 .can_switch = amdgpu_switcheroo_can_switch,
1081};
1082
1083int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1084 enum amd_ip_block_type block_type,
1085 enum amd_clockgating_state state)
1086{
1087 int i, r = 0;
1088
1089 for (i = 0; i < adev->num_ip_blocks; i++) {
1090 if (adev->ip_blocks[i].type == block_type) {
1091 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1092 state);
1093 if (r)
1094 return r;
1095 }
1096 }
1097 return r;
1098}
1099
1100int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1101 enum amd_ip_block_type block_type,
1102 enum amd_powergating_state state)
1103{
1104 int i, r = 0;
1105
1106 for (i = 0; i < adev->num_ip_blocks; i++) {
1107 if (adev->ip_blocks[i].type == block_type) {
1108 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
1109 state);
1110 if (r)
1111 return r;
1112 }
1113 }
1114 return r;
1115}
1116
1117const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1118 struct amdgpu_device *adev,
1119 enum amd_ip_block_type type)
1120{
1121 int i;
1122
1123 for (i = 0; i < adev->num_ip_blocks; i++)
1124 if (adev->ip_blocks[i].type == type)
1125 return &adev->ip_blocks[i];
1126
1127 return NULL;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1142 enum amd_ip_block_type type,
1143 u32 major, u32 minor)
1144{
1145 const struct amdgpu_ip_block_version *ip_block;
1146 ip_block = amdgpu_get_ip_block(adev, type);
1147
1148 if (ip_block && ((ip_block->major > major) ||
1149 ((ip_block->major == major) &&
1150 (ip_block->minor >= minor))))
1151 return 0;
1152
1153 return 1;
1154}
1155
1156static int amdgpu_early_init(struct amdgpu_device *adev)
1157{
1158 int i, r;
1159
1160 switch (adev->asic_type) {
1161 case CHIP_TOPAZ:
1162 case CHIP_TONGA:
1163 case CHIP_CARRIZO:
1164 if (adev->asic_type == CHIP_CARRIZO)
1165 adev->family = AMDGPU_FAMILY_CZ;
1166 else
1167 adev->family = AMDGPU_FAMILY_VI;
1168
1169 r = vi_set_ip_blocks(adev);
1170 if (r)
1171 return r;
1172 break;
1173#ifdef CONFIG_DRM_AMDGPU_CIK
1174 case CHIP_BONAIRE:
1175 case CHIP_HAWAII:
1176 case CHIP_KAVERI:
1177 case CHIP_KABINI:
1178 case CHIP_MULLINS:
1179 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1180 adev->family = AMDGPU_FAMILY_CI;
1181 else
1182 adev->family = AMDGPU_FAMILY_KV;
1183
1184 r = cik_set_ip_blocks(adev);
1185 if (r)
1186 return r;
1187 break;
1188#endif
1189 default:
1190
1191 return -EINVAL;
1192 }
1193
1194 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1195 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1196 if (adev->ip_block_status == NULL)
1197 return -ENOMEM;
1198
1199 if (adev->ip_blocks == NULL) {
1200 DRM_ERROR("No IP blocks found!\n");
1201 return r;
1202 }
1203
1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1205 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1206 DRM_ERROR("disabled ip block: %d\n", i);
1207 adev->ip_block_status[i].valid = false;
1208 } else {
1209 if (adev->ip_blocks[i].funcs->early_init) {
1210 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1211 if (r == -ENOENT)
1212 adev->ip_block_status[i].valid = false;
1213 else if (r)
1214 return r;
1215 else
1216 adev->ip_block_status[i].valid = true;
1217 } else {
1218 adev->ip_block_status[i].valid = true;
1219 }
1220 }
1221 }
1222
1223 return 0;
1224}
1225
1226static int amdgpu_init(struct amdgpu_device *adev)
1227{
1228 int i, r;
1229
1230 for (i = 0; i < adev->num_ip_blocks; i++) {
1231 if (!adev->ip_block_status[i].valid)
1232 continue;
1233 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1234 if (r)
1235 return r;
1236 adev->ip_block_status[i].sw = true;
1237
1238 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1239 r = amdgpu_vram_scratch_init(adev);
1240 if (r)
1241 return r;
1242 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1243 if (r)
1244 return r;
1245 r = amdgpu_wb_init(adev);
1246 if (r)
1247 return r;
1248 adev->ip_block_status[i].hw = true;
1249 }
1250 }
1251
1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1253 if (!adev->ip_block_status[i].sw)
1254 continue;
1255
1256 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
1257 continue;
1258 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1259 if (r)
1260 return r;
1261 adev->ip_block_status[i].hw = true;
1262 }
1263
1264 return 0;
1265}
1266
1267static int amdgpu_late_init(struct amdgpu_device *adev)
1268{
1269 int i = 0, r;
1270
1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1272 if (!adev->ip_block_status[i].valid)
1273 continue;
1274
1275 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1276 AMD_CG_STATE_GATE);
1277 if (r)
1278 return r;
1279 if (adev->ip_blocks[i].funcs->late_init) {
1280 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
1281 if (r)
1282 return r;
1283 }
1284 }
1285
1286 return 0;
1287}
1288
1289static int amdgpu_fini(struct amdgpu_device *adev)
1290{
1291 int i, r;
1292
1293 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1294 if (!adev->ip_block_status[i].hw)
1295 continue;
1296 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1297 amdgpu_wb_fini(adev);
1298 amdgpu_vram_scratch_fini(adev);
1299 }
1300
1301 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1302 AMD_CG_STATE_UNGATE);
1303 if (r)
1304 return r;
1305 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1306
1307 adev->ip_block_status[i].hw = false;
1308 }
1309
1310 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1311 if (!adev->ip_block_status[i].sw)
1312 continue;
1313 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1314
1315 adev->ip_block_status[i].sw = false;
1316 adev->ip_block_status[i].valid = false;
1317 }
1318
1319 return 0;
1320}
1321
1322static int amdgpu_suspend(struct amdgpu_device *adev)
1323{
1324 int i, r;
1325
1326 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1327 if (!adev->ip_block_status[i].valid)
1328 continue;
1329
1330 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1331 AMD_CG_STATE_UNGATE);
1332
1333 r = adev->ip_blocks[i].funcs->suspend(adev);
1334
1335 }
1336
1337 return 0;
1338}
1339
1340static int amdgpu_resume(struct amdgpu_device *adev)
1341{
1342 int i, r;
1343
1344 for (i = 0; i < adev->num_ip_blocks; i++) {
1345 if (!adev->ip_block_status[i].valid)
1346 continue;
1347 r = adev->ip_blocks[i].funcs->resume(adev);
1348 if (r)
1349 return r;
1350 }
1351
1352 return 0;
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367int amdgpu_device_init(struct amdgpu_device *adev,
1368 struct drm_device *ddev,
1369 struct pci_dev *pdev,
1370 uint32_t flags)
1371{
1372 int r, i;
1373 bool runtime = false;
1374
1375 adev->shutdown = false;
1376 adev->dev = &pdev->dev;
1377 adev->ddev = ddev;
1378 adev->pdev = pdev;
1379 adev->flags = flags;
1380 adev->asic_type = flags & AMDGPU_ASIC_MASK;
1381 adev->is_atom_bios = false;
1382 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1383 adev->mc.gtt_size = 512 * 1024 * 1024;
1384 adev->accel_working = false;
1385 adev->num_rings = 0;
1386 adev->mman.buffer_funcs = NULL;
1387 adev->mman.buffer_funcs_ring = NULL;
1388 adev->vm_manager.vm_pte_funcs = NULL;
1389 adev->vm_manager.vm_pte_funcs_ring = NULL;
1390 adev->gart.gart_funcs = NULL;
1391 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1392
1393 adev->smc_rreg = &amdgpu_invalid_rreg;
1394 adev->smc_wreg = &amdgpu_invalid_wreg;
1395 adev->pcie_rreg = &amdgpu_invalid_rreg;
1396 adev->pcie_wreg = &amdgpu_invalid_wreg;
1397 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1398 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1399 adev->didt_rreg = &amdgpu_invalid_rreg;
1400 adev->didt_wreg = &amdgpu_invalid_wreg;
1401 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1402 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1403
1404 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1405 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1406 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1407
1408
1409
1410 mutex_init(&adev->ring_lock);
1411 atomic_set(&adev->irq.ih.lock, 0);
1412 mutex_init(&adev->gem.mutex);
1413 mutex_init(&adev->pm.mutex);
1414 mutex_init(&adev->gfx.gpu_clock_mutex);
1415 mutex_init(&adev->srbm_mutex);
1416 mutex_init(&adev->grbm_idx_mutex);
1417 init_rwsem(&adev->exclusive_lock);
1418 mutex_init(&adev->mn_lock);
1419 hash_init(adev->mn_hash);
1420
1421 amdgpu_check_arguments(adev);
1422
1423
1424
1425 spin_lock_init(&adev->mmio_idx_lock);
1426 spin_lock_init(&adev->smc_idx_lock);
1427 spin_lock_init(&adev->pcie_idx_lock);
1428 spin_lock_init(&adev->uvd_ctx_idx_lock);
1429 spin_lock_init(&adev->didt_idx_lock);
1430 spin_lock_init(&adev->audio_endpt_idx_lock);
1431
1432 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1433 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1434 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1435 if (adev->rmmio == NULL) {
1436 return -ENOMEM;
1437 }
1438 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1439 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1440
1441
1442 amdgpu_doorbell_init(adev);
1443
1444
1445 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1446 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1447 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1448 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1449 break;
1450 }
1451 }
1452 if (adev->rio_mem == NULL)
1453 DRM_ERROR("Unable to find PCI I/O BAR\n");
1454
1455
1456 r = amdgpu_early_init(adev);
1457 if (r)
1458 return r;
1459
1460
1461
1462
1463 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1464
1465 if (amdgpu_runtime_pm == 1)
1466 runtime = true;
1467 if (amdgpu_device_is_px(ddev))
1468 runtime = true;
1469 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1470 if (runtime)
1471 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1472
1473
1474 if (!amdgpu_get_bios(adev))
1475 return -EINVAL;
1476
1477 if (!adev->is_atom_bios) {
1478 dev_err(adev->dev, "Expecting atombios for GPU\n");
1479 return -EINVAL;
1480 }
1481 r = amdgpu_atombios_init(adev);
1482 if (r)
1483 return r;
1484
1485
1486 if (!amdgpu_card_posted(adev)) {
1487 if (!adev->bios) {
1488 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
1489 return -EINVAL;
1490 }
1491 DRM_INFO("GPU not posted. posting now...\n");
1492 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1493 }
1494
1495
1496 r = amdgpu_atombios_get_clock_info(adev);
1497 if (r)
1498 return r;
1499
1500 amdgpu_atombios_i2c_init(adev);
1501
1502
1503 r = amdgpu_fence_driver_init(adev);
1504 if (r)
1505 return r;
1506
1507
1508 drm_mode_config_init(adev->ddev);
1509
1510 r = amdgpu_init(adev);
1511 if (r) {
1512 amdgpu_fini(adev);
1513 return r;
1514 }
1515
1516 adev->accel_working = true;
1517
1518 amdgpu_fbdev_init(adev);
1519
1520 r = amdgpu_ib_pool_init(adev);
1521 if (r) {
1522 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1523 return r;
1524 }
1525
1526 r = amdgpu_ib_ring_tests(adev);
1527 if (r)
1528 DRM_ERROR("ib ring test failed (%d).\n", r);
1529
1530 r = amdgpu_gem_debugfs_init(adev);
1531 if (r) {
1532 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1533 }
1534
1535 r = amdgpu_debugfs_regs_init(adev);
1536 if (r) {
1537 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1538 }
1539
1540 if ((amdgpu_testing & 1)) {
1541 if (adev->accel_working)
1542 amdgpu_test_moves(adev);
1543 else
1544 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1545 }
1546 if ((amdgpu_testing & 2)) {
1547 if (adev->accel_working)
1548 amdgpu_test_syncing(adev);
1549 else
1550 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1551 }
1552 if (amdgpu_benchmarking) {
1553 if (adev->accel_working)
1554 amdgpu_benchmark(adev, amdgpu_benchmarking);
1555 else
1556 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1557 }
1558
1559
1560
1561
1562 r = amdgpu_late_init(adev);
1563 if (r)
1564 return r;
1565
1566 return 0;
1567}
1568
1569static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579void amdgpu_device_fini(struct amdgpu_device *adev)
1580{
1581 int r;
1582
1583 DRM_INFO("amdgpu: finishing device.\n");
1584 adev->shutdown = true;
1585
1586 amdgpu_bo_evict_vram(adev);
1587 amdgpu_ib_pool_fini(adev);
1588 amdgpu_fence_driver_fini(adev);
1589 amdgpu_fbdev_fini(adev);
1590 r = amdgpu_fini(adev);
1591 kfree(adev->ip_block_status);
1592 adev->ip_block_status = NULL;
1593 adev->accel_working = false;
1594
1595 amdgpu_i2c_fini(adev);
1596 amdgpu_atombios_fini(adev);
1597 kfree(adev->bios);
1598 adev->bios = NULL;
1599 vga_switcheroo_unregister_client(adev->pdev);
1600 vga_client_register(adev->pdev, NULL, NULL, NULL);
1601 if (adev->rio_mem)
1602 pci_iounmap(adev->pdev, adev->rio_mem);
1603 adev->rio_mem = NULL;
1604 iounmap(adev->rmmio);
1605 adev->rmmio = NULL;
1606 amdgpu_doorbell_fini(adev);
1607 amdgpu_debugfs_regs_cleanup(adev);
1608 amdgpu_debugfs_remove_files(adev);
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1626{
1627 struct amdgpu_device *adev;
1628 struct drm_crtc *crtc;
1629 struct drm_connector *connector;
1630 int i, r;
1631 bool force_completion = false;
1632
1633 if (dev == NULL || dev->dev_private == NULL) {
1634 return -ENODEV;
1635 }
1636
1637 adev = dev->dev_private;
1638
1639 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1640 return 0;
1641
1642 drm_kms_helper_poll_disable(dev);
1643
1644
1645 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1646 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1647 }
1648
1649
1650 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1651 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1652 struct amdgpu_bo *robj;
1653
1654 if (rfb == NULL || rfb->obj == NULL) {
1655 continue;
1656 }
1657 robj = gem_to_amdgpu_bo(rfb->obj);
1658
1659 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1660 r = amdgpu_bo_reserve(robj, false);
1661 if (r == 0) {
1662 amdgpu_bo_unpin(robj);
1663 amdgpu_bo_unreserve(robj);
1664 }
1665 }
1666 }
1667
1668 amdgpu_bo_evict_vram(adev);
1669
1670
1671 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1672 struct amdgpu_ring *ring = adev->rings[i];
1673 if (!ring)
1674 continue;
1675
1676 r = amdgpu_fence_wait_empty(ring);
1677 if (r) {
1678
1679 force_completion = true;
1680 }
1681 }
1682 if (force_completion) {
1683 amdgpu_fence_driver_force_completion(adev);
1684 }
1685
1686 r = amdgpu_suspend(adev);
1687
1688
1689 amdgpu_bo_evict_vram(adev);
1690
1691 pci_save_state(dev->pdev);
1692 if (suspend) {
1693
1694 pci_disable_device(dev->pdev);
1695 pci_set_power_state(dev->pdev, PCI_D3hot);
1696 }
1697
1698 if (fbcon) {
1699 console_lock();
1700 amdgpu_fbdev_set_suspend(adev, 1);
1701 console_unlock();
1702 }
1703 return 0;
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1716{
1717 struct drm_connector *connector;
1718 struct amdgpu_device *adev = dev->dev_private;
1719 int r;
1720
1721 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1722 return 0;
1723
1724 if (fbcon) {
1725 console_lock();
1726 }
1727 if (resume) {
1728 pci_set_power_state(dev->pdev, PCI_D0);
1729 pci_restore_state(dev->pdev);
1730 if (pci_enable_device(dev->pdev)) {
1731 if (fbcon)
1732 console_unlock();
1733 return -1;
1734 }
1735 }
1736
1737
1738 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1739
1740 r = amdgpu_resume(adev);
1741
1742 r = amdgpu_ib_ring_tests(adev);
1743 if (r)
1744 DRM_ERROR("ib ring test failed (%d).\n", r);
1745
1746 r = amdgpu_late_init(adev);
1747 if (r)
1748 return r;
1749
1750
1751 if (fbcon) {
1752 drm_helper_resume_force_mode(dev);
1753
1754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1755 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1756 }
1757 }
1758
1759 drm_kms_helper_poll_enable(dev);
1760
1761 if (fbcon) {
1762 amdgpu_fbdev_set_suspend(adev, 0);
1763 console_unlock();
1764 }
1765
1766 return 0;
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777int amdgpu_gpu_reset(struct amdgpu_device *adev)
1778{
1779 unsigned ring_sizes[AMDGPU_MAX_RINGS];
1780 uint32_t *ring_data[AMDGPU_MAX_RINGS];
1781
1782 bool saved = false;
1783
1784 int i, r;
1785 int resched;
1786
1787 down_write(&adev->exclusive_lock);
1788
1789 if (!adev->needs_reset) {
1790 up_write(&adev->exclusive_lock);
1791 return 0;
1792 }
1793
1794 adev->needs_reset = false;
1795 atomic_inc(&adev->gpu_reset_counter);
1796
1797
1798 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1799
1800 r = amdgpu_suspend(adev);
1801
1802 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1803 struct amdgpu_ring *ring = adev->rings[i];
1804 if (!ring)
1805 continue;
1806
1807 ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
1808 if (ring_sizes[i]) {
1809 saved = true;
1810 dev_info(adev->dev, "Saved %d dwords of commands "
1811 "on ring %d.\n", ring_sizes[i], i);
1812 }
1813 }
1814
1815retry:
1816 r = amdgpu_asic_reset(adev);
1817 if (!r) {
1818 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
1819 r = amdgpu_resume(adev);
1820 }
1821
1822 if (!r) {
1823 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1824 struct amdgpu_ring *ring = adev->rings[i];
1825 if (!ring)
1826 continue;
1827
1828 amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
1829 ring_sizes[i] = 0;
1830 ring_data[i] = NULL;
1831 }
1832
1833 r = amdgpu_ib_ring_tests(adev);
1834 if (r) {
1835 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
1836 if (saved) {
1837 saved = false;
1838 r = amdgpu_suspend(adev);
1839 goto retry;
1840 }
1841 }
1842 } else {
1843 amdgpu_fence_driver_force_completion(adev);
1844 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1845 if (adev->rings[i])
1846 kfree(ring_data[i]);
1847 }
1848 }
1849
1850 drm_helper_resume_force_mode(adev->ddev);
1851
1852 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1853 if (r) {
1854
1855 dev_info(adev->dev, "GPU reset failed\n");
1856 }
1857
1858 up_write(&adev->exclusive_lock);
1859 return r;
1860}
1861
1862
1863
1864
1865
1866int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1867 struct drm_info_list *files,
1868 unsigned nfiles)
1869{
1870 unsigned i;
1871
1872 for (i = 0; i < adev->debugfs_count; i++) {
1873 if (adev->debugfs[i].files == files) {
1874
1875 return 0;
1876 }
1877 }
1878
1879 i = adev->debugfs_count + 1;
1880 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
1881 DRM_ERROR("Reached maximum number of debugfs components.\n");
1882 DRM_ERROR("Report so we increase "
1883 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
1884 return -EINVAL;
1885 }
1886 adev->debugfs[adev->debugfs_count].files = files;
1887 adev->debugfs[adev->debugfs_count].num_files = nfiles;
1888 adev->debugfs_count = i;
1889#if defined(CONFIG_DEBUG_FS)
1890 drm_debugfs_create_files(files, nfiles,
1891 adev->ddev->control->debugfs_root,
1892 adev->ddev->control);
1893 drm_debugfs_create_files(files, nfiles,
1894 adev->ddev->primary->debugfs_root,
1895 adev->ddev->primary);
1896#endif
1897 return 0;
1898}
1899
1900static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
1901{
1902#if defined(CONFIG_DEBUG_FS)
1903 unsigned i;
1904
1905 for (i = 0; i < adev->debugfs_count; i++) {
1906 drm_debugfs_remove_files(adev->debugfs[i].files,
1907 adev->debugfs[i].num_files,
1908 adev->ddev->control);
1909 drm_debugfs_remove_files(adev->debugfs[i].files,
1910 adev->debugfs[i].num_files,
1911 adev->ddev->primary);
1912 }
1913#endif
1914}
1915
1916#if defined(CONFIG_DEBUG_FS)
1917
1918static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
1919 size_t size, loff_t *pos)
1920{
1921 struct amdgpu_device *adev = f->f_inode->i_private;
1922 ssize_t result = 0;
1923 int r;
1924
1925 if (size & 0x3 || *pos & 0x3)
1926 return -EINVAL;
1927
1928 while (size) {
1929 uint32_t value;
1930
1931 if (*pos > adev->rmmio_size)
1932 return result;
1933
1934 value = RREG32(*pos >> 2);
1935 r = put_user(value, (uint32_t *)buf);
1936 if (r)
1937 return r;
1938
1939 result += 4;
1940 buf += 4;
1941 *pos += 4;
1942 size -= 4;
1943 }
1944
1945 return result;
1946}
1947
1948static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
1949 size_t size, loff_t *pos)
1950{
1951 struct amdgpu_device *adev = f->f_inode->i_private;
1952 ssize_t result = 0;
1953 int r;
1954
1955 if (size & 0x3 || *pos & 0x3)
1956 return -EINVAL;
1957
1958 while (size) {
1959 uint32_t value;
1960
1961 if (*pos > adev->rmmio_size)
1962 return result;
1963
1964 r = get_user(value, (uint32_t *)buf);
1965 if (r)
1966 return r;
1967
1968 WREG32(*pos >> 2, value);
1969
1970 result += 4;
1971 buf += 4;
1972 *pos += 4;
1973 size -= 4;
1974 }
1975
1976 return result;
1977}
1978
1979static const struct file_operations amdgpu_debugfs_regs_fops = {
1980 .owner = THIS_MODULE,
1981 .read = amdgpu_debugfs_regs_read,
1982 .write = amdgpu_debugfs_regs_write,
1983 .llseek = default_llseek
1984};
1985
1986static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1987{
1988 struct drm_minor *minor = adev->ddev->primary;
1989 struct dentry *ent, *root = minor->debugfs_root;
1990
1991 ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
1992 adev, &amdgpu_debugfs_regs_fops);
1993 if (IS_ERR(ent))
1994 return PTR_ERR(ent);
1995 i_size_write(ent->d_inode, adev->rmmio_size);
1996 adev->debugfs_regs = ent;
1997
1998 return 0;
1999}
2000
2001static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
2002{
2003 debugfs_remove(adev->debugfs_regs);
2004 adev->debugfs_regs = NULL;
2005}
2006
2007int amdgpu_debugfs_init(struct drm_minor *minor)
2008{
2009 return 0;
2010}
2011
2012void amdgpu_debugfs_cleanup(struct drm_minor *minor)
2013{
2014}
2015#else
2016static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2017{
2018 return 0;
2019}
2020static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
2021#endif
2022