1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/pm_runtime.h>
34#include <linux/vgaarb.h>
35#include <linux/vga_switcheroo.h>
36#include <linux/efi.h>
37#include "radeon_reg.h"
38#include "radeon.h"
39#include "atom.h"
40
41static const char radeon_family_name[][16] = {
42 "R100",
43 "RV100",
44 "RS100",
45 "RV200",
46 "RS200",
47 "R200",
48 "RV250",
49 "RS300",
50 "RV280",
51 "R300",
52 "R350",
53 "RV350",
54 "RV380",
55 "R420",
56 "R423",
57 "RV410",
58 "RS400",
59 "RS480",
60 "RS600",
61 "RS690",
62 "RS740",
63 "RV515",
64 "R520",
65 "RV530",
66 "RV560",
67 "RV570",
68 "R580",
69 "R600",
70 "RV610",
71 "RV630",
72 "RV670",
73 "RV620",
74 "RV635",
75 "RS780",
76 "RS880",
77 "RV770",
78 "RV730",
79 "RV710",
80 "RV740",
81 "CEDAR",
82 "REDWOOD",
83 "JUNIPER",
84 "CYPRESS",
85 "HEMLOCK",
86 "PALM",
87 "SUMO",
88 "SUMO2",
89 "BARTS",
90 "TURKS",
91 "CAICOS",
92 "CAYMAN",
93 "ARUBA",
94 "TAHITI",
95 "PITCAIRN",
96 "VERDE",
97 "OLAND",
98 "HAINAN",
99 "BONAIRE",
100 "KAVERI",
101 "KABINI",
102 "HAWAII",
103 "MULLINS",
104 "LAST",
105};
106
107#if defined(CONFIG_VGA_SWITCHEROO)
108bool radeon_has_atpx_dgpu_power_cntl(void);
109bool radeon_is_atpx_hybrid(void);
110#else
111static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
112static inline bool radeon_is_atpx_hybrid(void) { return false; }
113#endif
114
115#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
116
117struct radeon_px_quirk {
118 u32 chip_vendor;
119 u32 chip_device;
120 u32 subsys_vendor;
121 u32 subsys_device;
122 u32 px_quirk_flags;
123};
124
125static struct radeon_px_quirk radeon_px_quirk_list[] = {
126
127
128
129 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
130
131
132
133 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
134
135
136
137 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
138
139
140
141 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
142 { 0, 0, 0, 0, 0 },
143};
144
145bool radeon_is_px(struct drm_device *dev)
146{
147 struct radeon_device *rdev = dev->dev_private;
148
149 if (rdev->flags & RADEON_IS_PX)
150 return true;
151 return false;
152}
153
154static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
155{
156 struct radeon_px_quirk *p = radeon_px_quirk_list;
157
158
159 while (p && p->chip_device != 0) {
160 if (rdev->pdev->vendor == p->chip_vendor &&
161 rdev->pdev->device == p->chip_device &&
162 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
163 rdev->pdev->subsystem_device == p->subsys_device) {
164 rdev->px_quirk_flags = p->px_quirk_flags;
165 break;
166 }
167 ++p;
168 }
169
170 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
171 rdev->flags &= ~RADEON_IS_PX;
172
173
174 if (!radeon_is_atpx_hybrid() &&
175 !radeon_has_atpx_dgpu_power_cntl())
176 rdev->flags &= ~RADEON_IS_PX;
177}
178
179
180
181
182
183
184
185
186
187
188
189void radeon_program_register_sequence(struct radeon_device *rdev,
190 const u32 *registers,
191 const u32 array_size)
192{
193 u32 tmp, reg, and_mask, or_mask;
194 int i;
195
196 if (array_size % 3)
197 return;
198
199 for (i = 0; i < array_size; i +=3) {
200 reg = registers[i + 0];
201 and_mask = registers[i + 1];
202 or_mask = registers[i + 2];
203
204 if (and_mask == 0xffffffff) {
205 tmp = or_mask;
206 } else {
207 tmp = RREG32(reg);
208 tmp &= ~and_mask;
209 tmp |= or_mask;
210 }
211 WREG32(reg, tmp);
212 }
213}
214
215void radeon_pci_config_reset(struct radeon_device *rdev)
216{
217 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
218}
219
220
221
222
223
224
225
226
227void radeon_surface_init(struct radeon_device *rdev)
228{
229
230 if (rdev->family < CHIP_R600) {
231 int i;
232
233 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
234 if (rdev->surface_regs[i].bo)
235 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
236 else
237 radeon_clear_surface_reg(rdev, i);
238 }
239
240 WREG32(RADEON_SURFACE_CNTL, 0);
241 }
242}
243
244
245
246
247
248
249
250
251
252
253
254void radeon_scratch_init(struct radeon_device *rdev)
255{
256 int i;
257
258
259 if (rdev->family < CHIP_R300) {
260 rdev->scratch.num_reg = 5;
261 } else {
262 rdev->scratch.num_reg = 7;
263 }
264 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
265 for (i = 0; i < rdev->scratch.num_reg; i++) {
266 rdev->scratch.free[i] = true;
267 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
268 }
269}
270
271
272
273
274
275
276
277
278
279
280int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
281{
282 int i;
283
284 for (i = 0; i < rdev->scratch.num_reg; i++) {
285 if (rdev->scratch.free[i]) {
286 rdev->scratch.free[i] = false;
287 *reg = rdev->scratch.reg[i];
288 return 0;
289 }
290 }
291 return -EINVAL;
292}
293
294
295
296
297
298
299
300
301
302void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
303{
304 int i;
305
306 for (i = 0; i < rdev->scratch.num_reg; i++) {
307 if (rdev->scratch.reg[i] == reg) {
308 rdev->scratch.free[i] = true;
309 return;
310 }
311 }
312}
313
314
315
316
317
318
319
320
321
322
323
324
325static int radeon_doorbell_init(struct radeon_device *rdev)
326{
327
328 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
329 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
330
331 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
332 if (rdev->doorbell.num_doorbells == 0)
333 return -EINVAL;
334
335 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
336 if (rdev->doorbell.ptr == NULL) {
337 return -ENOMEM;
338 }
339 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
340 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
341
342 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
343
344 return 0;
345}
346
347
348
349
350
351
352
353
354static void radeon_doorbell_fini(struct radeon_device *rdev)
355{
356 iounmap(rdev->doorbell.ptr);
357 rdev->doorbell.ptr = NULL;
358}
359
360
361
362
363
364
365
366
367
368
369int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
370{
371 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
372 if (offset < rdev->doorbell.num_doorbells) {
373 __set_bit(offset, rdev->doorbell.used);
374 *doorbell = offset;
375 return 0;
376 } else {
377 return -EINVAL;
378 }
379}
380
381
382
383
384
385
386
387
388
389void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
390{
391 if (doorbell < rdev->doorbell.num_doorbells)
392 __clear_bit(doorbell, rdev->doorbell.used);
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
409 phys_addr_t *aperture_base,
410 size_t *aperture_size,
411 size_t *start_offset)
412{
413
414
415 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
416 *aperture_base = rdev->doorbell.base;
417 *aperture_size = rdev->doorbell.size;
418 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
419 } else {
420 *aperture_base = 0;
421 *aperture_size = 0;
422 *start_offset = 0;
423 }
424}
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440void radeon_wb_disable(struct radeon_device *rdev)
441{
442 rdev->wb.enabled = false;
443}
444
445
446
447
448
449
450
451
452
453void radeon_wb_fini(struct radeon_device *rdev)
454{
455 radeon_wb_disable(rdev);
456 if (rdev->wb.wb_obj) {
457 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
458 radeon_bo_kunmap(rdev->wb.wb_obj);
459 radeon_bo_unpin(rdev->wb.wb_obj);
460 radeon_bo_unreserve(rdev->wb.wb_obj);
461 }
462 radeon_bo_unref(&rdev->wb.wb_obj);
463 rdev->wb.wb = NULL;
464 rdev->wb.wb_obj = NULL;
465 }
466}
467
468
469
470
471
472
473
474
475
476
477int radeon_wb_init(struct radeon_device *rdev)
478{
479 int r;
480
481 if (rdev->wb.wb_obj == NULL) {
482 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
483 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
484 &rdev->wb.wb_obj);
485 if (r) {
486 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
487 return r;
488 }
489 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
490 if (unlikely(r != 0)) {
491 radeon_wb_fini(rdev);
492 return r;
493 }
494 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
495 &rdev->wb.gpu_addr);
496 if (r) {
497 radeon_bo_unreserve(rdev->wb.wb_obj);
498 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
499 radeon_wb_fini(rdev);
500 return r;
501 }
502 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
503 radeon_bo_unreserve(rdev->wb.wb_obj);
504 if (r) {
505 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
506 radeon_wb_fini(rdev);
507 return r;
508 }
509 }
510
511
512 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
513
514 rdev->wb.use_event = false;
515
516 if (radeon_no_wb == 1) {
517 rdev->wb.enabled = false;
518 } else {
519 if (rdev->flags & RADEON_IS_AGP) {
520
521 rdev->wb.enabled = false;
522 } else if (rdev->family < CHIP_R300) {
523
524 rdev->wb.enabled = false;
525 } else {
526 rdev->wb.enabled = true;
527
528 if (rdev->family >= CHIP_R600) {
529 rdev->wb.use_event = true;
530 }
531 }
532 }
533
534 if (rdev->family >= CHIP_PALM) {
535 rdev->wb.enabled = true;
536 rdev->wb.use_event = true;
537 }
538
539 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
540
541 return 0;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
586{
587 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
588
589 mc->vram_start = base;
590 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
591 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
592 mc->real_vram_size = mc->aper_size;
593 mc->mc_vram_size = mc->aper_size;
594 }
595 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
596 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
597 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
598 mc->real_vram_size = mc->aper_size;
599 mc->mc_vram_size = mc->aper_size;
600 }
601 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
602 if (limit && limit < mc->real_vram_size)
603 mc->real_vram_size = limit;
604 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
605 mc->mc_vram_size >> 20, mc->vram_start,
606 mc->vram_end, mc->real_vram_size >> 20);
607}
608
609
610
611
612
613
614
615
616
617
618
619
620
621void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
622{
623 u64 size_af, size_bf;
624
625 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
626 size_bf = mc->vram_start & ~mc->gtt_base_align;
627 if (size_bf > size_af) {
628 if (mc->gtt_size > size_bf) {
629 dev_warn(rdev->dev, "limiting GTT\n");
630 mc->gtt_size = size_bf;
631 }
632 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
633 } else {
634 if (mc->gtt_size > size_af) {
635 dev_warn(rdev->dev, "limiting GTT\n");
636 mc->gtt_size = size_af;
637 }
638 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
639 }
640 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
641 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
642 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
643}
644
645
646
647
648
649
650
651
652
653
654
655
656bool radeon_device_is_virtual(void)
657{
658#ifdef CONFIG_X86
659 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
660#else
661 return false;
662#endif
663}
664
665
666
667
668
669
670
671
672
673
674bool radeon_card_posted(struct radeon_device *rdev)
675{
676 uint32_t reg;
677
678
679 if (rdev->family >= CHIP_BONAIRE &&
680 radeon_device_is_virtual())
681 return false;
682
683
684 if (efi_enabled(EFI_BOOT) &&
685 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
686 (rdev->family < CHIP_R600))
687 return false;
688
689 if (ASIC_IS_NODCE(rdev))
690 goto check_memsize;
691
692
693 if (ASIC_IS_DCE4(rdev)) {
694 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
695 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
696 if (rdev->num_crtc >= 4) {
697 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
698 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
699 }
700 if (rdev->num_crtc >= 6) {
701 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
702 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
703 }
704 if (reg & EVERGREEN_CRTC_MASTER_EN)
705 return true;
706 } else if (ASIC_IS_AVIVO(rdev)) {
707 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
708 RREG32(AVIVO_D2CRTC_CONTROL);
709 if (reg & AVIVO_CRTC_EN) {
710 return true;
711 }
712 } else {
713 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
714 RREG32(RADEON_CRTC2_GEN_CNTL);
715 if (reg & RADEON_CRTC_EN) {
716 return true;
717 }
718 }
719
720check_memsize:
721
722 if (rdev->family >= CHIP_R600)
723 reg = RREG32(R600_CONFIG_MEMSIZE);
724 else
725 reg = RREG32(RADEON_CONFIG_MEMSIZE);
726
727 if (reg)
728 return true;
729
730 return false;
731
732}
733
734
735
736
737
738
739
740
741
742void radeon_update_bandwidth_info(struct radeon_device *rdev)
743{
744 fixed20_12 a;
745 u32 sclk = rdev->pm.current_sclk;
746 u32 mclk = rdev->pm.current_mclk;
747
748
749 a.full = dfixed_const(100);
750 rdev->pm.sclk.full = dfixed_const(sclk);
751 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
752 rdev->pm.mclk.full = dfixed_const(mclk);
753 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
754
755 if (rdev->flags & RADEON_IS_IGP) {
756 a.full = dfixed_const(16);
757
758 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
759 }
760}
761
762
763
764
765
766
767
768
769
770
771bool radeon_boot_test_post_card(struct radeon_device *rdev)
772{
773 if (radeon_card_posted(rdev))
774 return true;
775
776 if (rdev->bios) {
777 DRM_INFO("GPU not posted. posting now...\n");
778 if (rdev->is_atom_bios)
779 atom_asic_init(rdev->mode_info.atom_context);
780 else
781 radeon_combios_asic_init(rdev->ddev);
782 return true;
783 } else {
784 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
785 return false;
786 }
787}
788
789
790
791
792
793
794
795
796
797
798
799int radeon_dummy_page_init(struct radeon_device *rdev)
800{
801 if (rdev->dummy_page.page)
802 return 0;
803 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
804 if (rdev->dummy_page.page == NULL)
805 return -ENOMEM;
806 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
807 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
808 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
809 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
810 __free_page(rdev->dummy_page.page);
811 rdev->dummy_page.page = NULL;
812 return -ENOMEM;
813 }
814 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
815 RADEON_GART_PAGE_DUMMY);
816 return 0;
817}
818
819
820
821
822
823
824
825
826void radeon_dummy_page_fini(struct radeon_device *rdev)
827{
828 if (rdev->dummy_page.page == NULL)
829 return;
830 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
831 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
832 __free_page(rdev->dummy_page.page);
833 rdev->dummy_page.page = NULL;
834}
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
856{
857 struct radeon_device *rdev = info->dev->dev_private;
858 uint32_t r;
859
860 r = rdev->pll_rreg(rdev, reg);
861 return r;
862}
863
864
865
866
867
868
869
870
871
872
873static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
874{
875 struct radeon_device *rdev = info->dev->dev_private;
876
877 rdev->pll_wreg(rdev, reg, val);
878}
879
880
881
882
883
884
885
886
887
888
889static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
890{
891 struct radeon_device *rdev = info->dev->dev_private;
892 uint32_t r;
893
894 r = rdev->mc_rreg(rdev, reg);
895 return r;
896}
897
898
899
900
901
902
903
904
905
906
907static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
908{
909 struct radeon_device *rdev = info->dev->dev_private;
910
911 rdev->mc_wreg(rdev, reg, val);
912}
913
914
915
916
917
918
919
920
921
922
923static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
924{
925 struct radeon_device *rdev = info->dev->dev_private;
926
927 WREG32(reg*4, val);
928}
929
930
931
932
933
934
935
936
937
938
939static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
940{
941 struct radeon_device *rdev = info->dev->dev_private;
942 uint32_t r;
943
944 r = RREG32(reg*4);
945 return r;
946}
947
948
949
950
951
952
953
954
955
956
957static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
958{
959 struct radeon_device *rdev = info->dev->dev_private;
960
961 WREG32_IO(reg*4, val);
962}
963
964
965
966
967
968
969
970
971
972
973static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
974{
975 struct radeon_device *rdev = info->dev->dev_private;
976 uint32_t r;
977
978 r = RREG32_IO(reg*4);
979 return r;
980}
981
982
983
984
985
986
987
988
989
990
991
992int radeon_atombios_init(struct radeon_device *rdev)
993{
994 struct card_info *atom_card_info =
995 kzalloc(sizeof(struct card_info), GFP_KERNEL);
996
997 if (!atom_card_info)
998 return -ENOMEM;
999
1000 rdev->mode_info.atom_card_info = atom_card_info;
1001 atom_card_info->dev = rdev->ddev;
1002 atom_card_info->reg_read = cail_reg_read;
1003 atom_card_info->reg_write = cail_reg_write;
1004
1005 if (rdev->rio_mem) {
1006 atom_card_info->ioreg_read = cail_ioreg_read;
1007 atom_card_info->ioreg_write = cail_ioreg_write;
1008 } else {
1009 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
1010 atom_card_info->ioreg_read = cail_reg_read;
1011 atom_card_info->ioreg_write = cail_reg_write;
1012 }
1013 atom_card_info->mc_read = cail_mc_read;
1014 atom_card_info->mc_write = cail_mc_write;
1015 atom_card_info->pll_read = cail_pll_read;
1016 atom_card_info->pll_write = cail_pll_write;
1017
1018 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1019 if (!rdev->mode_info.atom_context) {
1020 radeon_atombios_fini(rdev);
1021 return -ENOMEM;
1022 }
1023
1024 mutex_init(&rdev->mode_info.atom_context->mutex);
1025 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1026 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1027 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1028 return 0;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040void radeon_atombios_fini(struct radeon_device *rdev)
1041{
1042 if (rdev->mode_info.atom_context) {
1043 kfree(rdev->mode_info.atom_context->scratch);
1044 }
1045 kfree(rdev->mode_info.atom_context);
1046 rdev->mode_info.atom_context = NULL;
1047 kfree(rdev->mode_info.atom_card_info);
1048 rdev->mode_info.atom_card_info = NULL;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067int radeon_combios_init(struct radeon_device *rdev)
1068{
1069 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1070 return 0;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081void radeon_combios_fini(struct radeon_device *rdev)
1082{
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1096{
1097 struct radeon_device *rdev = cookie;
1098 radeon_vga_set_state(rdev, state);
1099 if (state)
1100 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1101 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1102 else
1103 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static bool radeon_check_pot_argument(int arg)
1115{
1116 return (arg & (arg - 1)) == 0;
1117}
1118
1119
1120
1121
1122
1123
1124static int radeon_gart_size_auto(enum radeon_family family)
1125{
1126
1127 if (family >= CHIP_TAHITI)
1128 return 2048;
1129 else if (family >= CHIP_RV770)
1130 return 1024;
1131 else
1132 return 512;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static void radeon_check_arguments(struct radeon_device *rdev)
1144{
1145
1146 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1147 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1148 radeon_vram_limit);
1149 radeon_vram_limit = 0;
1150 }
1151
1152 if (radeon_gart_size == -1) {
1153 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1154 }
1155
1156 if (radeon_gart_size < 32) {
1157 dev_warn(rdev->dev, "gart size (%d) too small\n",
1158 radeon_gart_size);
1159 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1160 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1161 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1162 radeon_gart_size);
1163 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1164 }
1165 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1166
1167
1168 switch (radeon_agpmode) {
1169 case -1:
1170 case 0:
1171 case 1:
1172 case 2:
1173 case 4:
1174 case 8:
1175 break;
1176 default:
1177 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1178 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1179 radeon_agpmode = 0;
1180 break;
1181 }
1182
1183 if (!radeon_check_pot_argument(radeon_vm_size)) {
1184 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1185 radeon_vm_size);
1186 radeon_vm_size = 4;
1187 }
1188
1189 if (radeon_vm_size < 1) {
1190 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1191 radeon_vm_size);
1192 radeon_vm_size = 4;
1193 }
1194
1195
1196
1197
1198 if (radeon_vm_size > 1024) {
1199 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1200 radeon_vm_size);
1201 radeon_vm_size = 4;
1202 }
1203
1204
1205
1206
1207 if (radeon_vm_block_size == -1) {
1208
1209
1210 unsigned bits = ilog2(radeon_vm_size) + 18;
1211
1212
1213
1214 if (radeon_vm_size <= 8)
1215 radeon_vm_block_size = bits - 9;
1216 else
1217 radeon_vm_block_size = (bits + 3) / 2;
1218
1219 } else if (radeon_vm_block_size < 9) {
1220 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1221 radeon_vm_block_size);
1222 radeon_vm_block_size = 9;
1223 }
1224
1225 if (radeon_vm_block_size > 24 ||
1226 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1227 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1228 radeon_vm_block_size);
1229 radeon_vm_block_size = 9;
1230 }
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1243{
1244 struct drm_device *dev = pci_get_drvdata(pdev);
1245
1246 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1247 return;
1248
1249 if (state == VGA_SWITCHEROO_ON) {
1250 pr_info("radeon: switched on\n");
1251
1252 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1253
1254 radeon_resume_kms(dev, true, true);
1255
1256 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1257 drm_kms_helper_poll_enable(dev);
1258 } else {
1259 pr_info("radeon: switched off\n");
1260 drm_kms_helper_poll_disable(dev);
1261 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1262 radeon_suspend_kms(dev, true, true, false);
1263 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1264 }
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1277{
1278 struct drm_device *dev = pci_get_drvdata(pdev);
1279
1280
1281
1282
1283
1284
1285 return dev->open_count == 0;
1286}
1287
1288static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1289 .set_gpu_state = radeon_switcheroo_set_state,
1290 .reprobe = NULL,
1291 .can_switch = radeon_switcheroo_can_switch,
1292};
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306int radeon_device_init(struct radeon_device *rdev,
1307 struct drm_device *ddev,
1308 struct pci_dev *pdev,
1309 uint32_t flags)
1310{
1311 int r, i;
1312 int dma_bits;
1313 bool runtime = false;
1314
1315 rdev->shutdown = false;
1316 rdev->dev = &pdev->dev;
1317 rdev->ddev = ddev;
1318 rdev->pdev = pdev;
1319 rdev->flags = flags;
1320 rdev->family = flags & RADEON_FAMILY_MASK;
1321 rdev->is_atom_bios = false;
1322 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1323 rdev->mc.gtt_size = 512 * 1024 * 1024;
1324 rdev->accel_working = false;
1325
1326 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1327 rdev->ring[i].idx = i;
1328 }
1329 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1330
1331 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1332 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1333 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1334
1335
1336
1337 mutex_init(&rdev->ring_lock);
1338 mutex_init(&rdev->dc_hw_i2c_mutex);
1339 atomic_set(&rdev->ih.lock, 0);
1340 mutex_init(&rdev->gem.mutex);
1341 mutex_init(&rdev->pm.mutex);
1342 mutex_init(&rdev->gpu_clock_mutex);
1343 mutex_init(&rdev->srbm_mutex);
1344 mutex_init(&rdev->grbm_idx_mutex);
1345 init_rwsem(&rdev->pm.mclk_lock);
1346 init_rwsem(&rdev->exclusive_lock);
1347 init_waitqueue_head(&rdev->irq.vblank_queue);
1348 mutex_init(&rdev->mn_lock);
1349 hash_init(rdev->mn_hash);
1350 r = radeon_gem_init(rdev);
1351 if (r)
1352 return r;
1353
1354 radeon_check_arguments(rdev);
1355
1356
1357
1358 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1359
1360
1361 r = radeon_asic_init(rdev);
1362 if (r)
1363 return r;
1364
1365
1366
1367
1368 if ((rdev->family >= CHIP_RS400) &&
1369 (rdev->flags & RADEON_IS_IGP)) {
1370 rdev->flags &= ~RADEON_IS_AGP;
1371 }
1372
1373 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1374 radeon_agp_disable(rdev);
1375 }
1376
1377
1378
1379
1380
1381 if (rdev->family >= CHIP_CAYMAN)
1382 rdev->mc.mc_mask = 0xffffffffffULL;
1383 else if (rdev->family >= CHIP_CEDAR)
1384 rdev->mc.mc_mask = 0xfffffffffULL;
1385 else
1386 rdev->mc.mc_mask = 0xffffffffULL;
1387
1388
1389
1390
1391
1392
1393
1394 rdev->need_dma32 = false;
1395 if (rdev->flags & RADEON_IS_AGP)
1396 rdev->need_dma32 = true;
1397 if ((rdev->flags & RADEON_IS_PCI) &&
1398 (rdev->family <= CHIP_RS740))
1399 rdev->need_dma32 = true;
1400
1401 dma_bits = rdev->need_dma32 ? 32 : 40;
1402 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1403 if (r) {
1404 rdev->need_dma32 = true;
1405 dma_bits = 32;
1406 pr_warn("radeon: No suitable DMA available\n");
1407 }
1408 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1409 if (r) {
1410 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1411 pr_warn("radeon: No coherent DMA available\n");
1412 }
1413
1414
1415
1416 spin_lock_init(&rdev->mmio_idx_lock);
1417 spin_lock_init(&rdev->smc_idx_lock);
1418 spin_lock_init(&rdev->pll_idx_lock);
1419 spin_lock_init(&rdev->mc_idx_lock);
1420 spin_lock_init(&rdev->pcie_idx_lock);
1421 spin_lock_init(&rdev->pciep_idx_lock);
1422 spin_lock_init(&rdev->pif_idx_lock);
1423 spin_lock_init(&rdev->cg_idx_lock);
1424 spin_lock_init(&rdev->uvd_idx_lock);
1425 spin_lock_init(&rdev->rcu_idx_lock);
1426 spin_lock_init(&rdev->didt_idx_lock);
1427 spin_lock_init(&rdev->end_idx_lock);
1428 if (rdev->family >= CHIP_BONAIRE) {
1429 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1430 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1431 } else {
1432 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1433 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1434 }
1435 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1436 if (rdev->rmmio == NULL)
1437 return -ENOMEM;
1438
1439
1440 if (rdev->family >= CHIP_BONAIRE)
1441 radeon_doorbell_init(rdev);
1442
1443
1444 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1445 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1446 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1447 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1448 break;
1449 }
1450 }
1451 if (rdev->rio_mem == NULL)
1452 DRM_ERROR("Unable to find PCI I/O BAR\n");
1453
1454 if (rdev->flags & RADEON_IS_PX)
1455 radeon_device_handle_px_quirks(rdev);
1456
1457
1458
1459
1460 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1461
1462 if (rdev->flags & RADEON_IS_PX)
1463 runtime = true;
1464 if (!pci_is_thunderbolt_attached(rdev->pdev))
1465 vga_switcheroo_register_client(rdev->pdev,
1466 &radeon_switcheroo_ops, runtime);
1467 if (runtime)
1468 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1469
1470 r = radeon_init(rdev);
1471 if (r)
1472 goto failed;
1473
1474 r = radeon_gem_debugfs_init(rdev);
1475 if (r) {
1476 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1477 }
1478
1479 r = radeon_mst_debugfs_init(rdev);
1480 if (r) {
1481 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1482 }
1483
1484 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1485
1486
1487
1488 radeon_asic_reset(rdev);
1489 radeon_fini(rdev);
1490 radeon_agp_disable(rdev);
1491 r = radeon_init(rdev);
1492 if (r)
1493 goto failed;
1494 }
1495
1496 r = radeon_ib_ring_tests(rdev);
1497 if (r)
1498 DRM_ERROR("ib ring test failed (%d).\n", r);
1499
1500
1501
1502
1503
1504
1505 if (rdev->pm.dpm_enabled &&
1506 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1507 (rdev->family == CHIP_TURKS) &&
1508 (rdev->flags & RADEON_IS_MOBILITY)) {
1509 mutex_lock(&rdev->pm.mutex);
1510 radeon_dpm_disable(rdev);
1511 radeon_dpm_enable(rdev);
1512 mutex_unlock(&rdev->pm.mutex);
1513 }
1514
1515 if ((radeon_testing & 1)) {
1516 if (rdev->accel_working)
1517 radeon_test_moves(rdev);
1518 else
1519 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1520 }
1521 if ((radeon_testing & 2)) {
1522 if (rdev->accel_working)
1523 radeon_test_syncing(rdev);
1524 else
1525 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1526 }
1527 if (radeon_benchmarking) {
1528 if (rdev->accel_working)
1529 radeon_benchmark(rdev, radeon_benchmarking);
1530 else
1531 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1532 }
1533 return 0;
1534
1535failed:
1536
1537 if (radeon_is_px(ddev))
1538 pm_runtime_put_noidle(ddev->dev);
1539 if (runtime)
1540 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1541 return r;
1542}
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552void radeon_device_fini(struct radeon_device *rdev)
1553{
1554 DRM_INFO("radeon: finishing device.\n");
1555 rdev->shutdown = true;
1556
1557 radeon_bo_evict_vram(rdev);
1558 radeon_fini(rdev);
1559 if (!pci_is_thunderbolt_attached(rdev->pdev))
1560 vga_switcheroo_unregister_client(rdev->pdev);
1561 if (rdev->flags & RADEON_IS_PX)
1562 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1563 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1564 if (rdev->rio_mem)
1565 pci_iounmap(rdev->pdev, rdev->rio_mem);
1566 rdev->rio_mem = NULL;
1567 iounmap(rdev->rmmio);
1568 rdev->rmmio = NULL;
1569 if (rdev->family >= CHIP_BONAIRE)
1570 radeon_doorbell_fini(rdev);
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1588 bool fbcon, bool freeze)
1589{
1590 struct radeon_device *rdev;
1591 struct drm_crtc *crtc;
1592 struct drm_connector *connector;
1593 int i, r;
1594
1595 if (dev == NULL || dev->dev_private == NULL) {
1596 return -ENODEV;
1597 }
1598
1599 rdev = dev->dev_private;
1600
1601 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1602 return 0;
1603
1604 drm_kms_helper_poll_disable(dev);
1605
1606 drm_modeset_lock_all(dev);
1607
1608 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1609 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1610 }
1611 drm_modeset_unlock_all(dev);
1612
1613
1614 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1615 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1616 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1617 struct radeon_bo *robj;
1618
1619 if (radeon_crtc->cursor_bo) {
1620 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1621 r = radeon_bo_reserve(robj, false);
1622 if (r == 0) {
1623 radeon_bo_unpin(robj);
1624 radeon_bo_unreserve(robj);
1625 }
1626 }
1627
1628 if (rfb == NULL || rfb->obj == NULL) {
1629 continue;
1630 }
1631 robj = gem_to_radeon_bo(rfb->obj);
1632
1633 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1634 r = radeon_bo_reserve(robj, false);
1635 if (r == 0) {
1636 radeon_bo_unpin(robj);
1637 radeon_bo_unreserve(robj);
1638 }
1639 }
1640 }
1641
1642 radeon_bo_evict_vram(rdev);
1643
1644
1645 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1646 r = radeon_fence_wait_empty(rdev, i);
1647 if (r) {
1648
1649 radeon_fence_driver_force_completion(rdev, i);
1650 }
1651 }
1652
1653 radeon_save_bios_scratch_regs(rdev);
1654
1655 radeon_suspend(rdev);
1656 radeon_hpd_fini(rdev);
1657
1658
1659
1660
1661 radeon_bo_evict_vram(rdev);
1662
1663 radeon_agp_suspend(rdev);
1664
1665 pci_save_state(dev->pdev);
1666 if (freeze && rdev->family >= CHIP_CEDAR) {
1667 rdev->asic->asic_reset(rdev, true);
1668 pci_restore_state(dev->pdev);
1669 } else if (suspend) {
1670
1671 pci_disable_device(dev->pdev);
1672 pci_set_power_state(dev->pdev, PCI_D3hot);
1673 }
1674
1675 if (fbcon) {
1676 console_lock();
1677 radeon_fbdev_set_suspend(rdev, 1);
1678 console_unlock();
1679 }
1680 return 0;
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1693{
1694 struct drm_connector *connector;
1695 struct radeon_device *rdev = dev->dev_private;
1696 struct drm_crtc *crtc;
1697 int r;
1698
1699 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1700 return 0;
1701
1702 if (fbcon) {
1703 console_lock();
1704 }
1705 if (resume) {
1706 pci_set_power_state(dev->pdev, PCI_D0);
1707 pci_restore_state(dev->pdev);
1708 if (pci_enable_device(dev->pdev)) {
1709 if (fbcon)
1710 console_unlock();
1711 return -1;
1712 }
1713 }
1714
1715 radeon_agp_resume(rdev);
1716 radeon_resume(rdev);
1717
1718 r = radeon_ib_ring_tests(rdev);
1719 if (r)
1720 DRM_ERROR("ib ring test failed (%d).\n", r);
1721
1722 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1723
1724 r = radeon_pm_late_init(rdev);
1725 if (r) {
1726 rdev->pm.dpm_enabled = false;
1727 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1728 }
1729 } else {
1730
1731 radeon_pm_resume(rdev);
1732 }
1733
1734 radeon_restore_bios_scratch_regs(rdev);
1735
1736
1737 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1738 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1739
1740 if (radeon_crtc->cursor_bo) {
1741 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1742 r = radeon_bo_reserve(robj, false);
1743 if (r == 0) {
1744
1745 r = radeon_bo_pin_restricted(robj,
1746 RADEON_GEM_DOMAIN_VRAM,
1747 ASIC_IS_AVIVO(rdev) ?
1748 0 : 1 << 27,
1749 &radeon_crtc->cursor_addr);
1750 if (r != 0)
1751 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1752 radeon_bo_unreserve(robj);
1753 }
1754 }
1755 }
1756
1757
1758 if (rdev->is_atom_bios) {
1759 radeon_atom_encoder_init(rdev);
1760 radeon_atom_disp_eng_pll_init(rdev);
1761
1762 if (rdev->mode_info.bl_encoder) {
1763 u8 bl_level = radeon_get_backlight_level(rdev,
1764 rdev->mode_info.bl_encoder);
1765 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1766 bl_level);
1767 }
1768 }
1769
1770 radeon_hpd_init(rdev);
1771
1772 if (fbcon) {
1773 drm_helper_resume_force_mode(dev);
1774
1775 drm_modeset_lock_all(dev);
1776 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1777 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1778 }
1779 drm_modeset_unlock_all(dev);
1780 }
1781
1782 drm_kms_helper_poll_enable(dev);
1783
1784
1785 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1786 radeon_pm_compute_clocks(rdev);
1787
1788 if (fbcon) {
1789 radeon_fbdev_set_suspend(rdev, 0);
1790 console_unlock();
1791 }
1792
1793 return 0;
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804int radeon_gpu_reset(struct radeon_device *rdev)
1805{
1806 unsigned ring_sizes[RADEON_NUM_RINGS];
1807 uint32_t *ring_data[RADEON_NUM_RINGS];
1808
1809 bool saved = false;
1810
1811 int i, r;
1812 int resched;
1813
1814 down_write(&rdev->exclusive_lock);
1815
1816 if (!rdev->needs_reset) {
1817 up_write(&rdev->exclusive_lock);
1818 return 0;
1819 }
1820
1821 atomic_inc(&rdev->gpu_reset_counter);
1822
1823 radeon_save_bios_scratch_regs(rdev);
1824
1825 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1826 radeon_suspend(rdev);
1827 radeon_hpd_fini(rdev);
1828
1829 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1830 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1831 &ring_data[i]);
1832 if (ring_sizes[i]) {
1833 saved = true;
1834 dev_info(rdev->dev, "Saved %d dwords of commands "
1835 "on ring %d.\n", ring_sizes[i], i);
1836 }
1837 }
1838
1839 r = radeon_asic_reset(rdev);
1840 if (!r) {
1841 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1842 radeon_resume(rdev);
1843 }
1844
1845 radeon_restore_bios_scratch_regs(rdev);
1846
1847 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1848 if (!r && ring_data[i]) {
1849 radeon_ring_restore(rdev, &rdev->ring[i],
1850 ring_sizes[i], ring_data[i]);
1851 } else {
1852 radeon_fence_driver_force_completion(rdev, i);
1853 kfree(ring_data[i]);
1854 }
1855 }
1856
1857 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1858
1859 r = radeon_pm_late_init(rdev);
1860 if (r) {
1861 rdev->pm.dpm_enabled = false;
1862 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1863 }
1864 } else {
1865
1866 radeon_pm_resume(rdev);
1867 }
1868
1869
1870 if (rdev->is_atom_bios) {
1871 radeon_atom_encoder_init(rdev);
1872 radeon_atom_disp_eng_pll_init(rdev);
1873
1874 if (rdev->mode_info.bl_encoder) {
1875 u8 bl_level = radeon_get_backlight_level(rdev,
1876 rdev->mode_info.bl_encoder);
1877 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1878 bl_level);
1879 }
1880 }
1881
1882 radeon_hpd_init(rdev);
1883
1884 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1885
1886 rdev->in_reset = true;
1887 rdev->needs_reset = false;
1888
1889 downgrade_write(&rdev->exclusive_lock);
1890
1891 drm_helper_resume_force_mode(rdev->ddev);
1892
1893
1894 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1895 radeon_pm_compute_clocks(rdev);
1896
1897 if (!r) {
1898 r = radeon_ib_ring_tests(rdev);
1899 if (r && saved)
1900 r = -EAGAIN;
1901 } else {
1902
1903 dev_info(rdev->dev, "GPU reset failed\n");
1904 }
1905
1906 rdev->needs_reset = r == -EAGAIN;
1907 rdev->in_reset = false;
1908
1909 up_read(&rdev->exclusive_lock);
1910 return r;
1911}
1912
1913
1914
1915
1916
1917int radeon_debugfs_add_files(struct radeon_device *rdev,
1918 struct drm_info_list *files,
1919 unsigned nfiles)
1920{
1921 unsigned i;
1922
1923 for (i = 0; i < rdev->debugfs_count; i++) {
1924 if (rdev->debugfs[i].files == files) {
1925
1926 return 0;
1927 }
1928 }
1929
1930 i = rdev->debugfs_count + 1;
1931 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1932 DRM_ERROR("Reached maximum number of debugfs components.\n");
1933 DRM_ERROR("Report so we increase "
1934 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1935 return -EINVAL;
1936 }
1937 rdev->debugfs[rdev->debugfs_count].files = files;
1938 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1939 rdev->debugfs_count = i;
1940#if defined(CONFIG_DEBUG_FS)
1941 drm_debugfs_create_files(files, nfiles,
1942 rdev->ddev->primary->debugfs_root,
1943 rdev->ddev->primary);
1944#endif
1945 return 0;
1946}
1947