1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_probe_helper.h>
33#include <drm/drm_cache.h>
34#include <drm/radeon_drm.h>
35#include <linux/pm_runtime.h>
36#include <linux/vgaarb.h>
37#include <linux/vga_switcheroo.h>
38#include <linux/efi.h>
39#include "radeon_reg.h"
40#include "radeon.h"
41#include "atom.h"
42
43static const char radeon_family_name[][16] = {
44 "R100",
45 "RV100",
46 "RS100",
47 "RV200",
48 "RS200",
49 "R200",
50 "RV250",
51 "RS300",
52 "RV280",
53 "R300",
54 "R350",
55 "RV350",
56 "RV380",
57 "R420",
58 "R423",
59 "RV410",
60 "RS400",
61 "RS480",
62 "RS600",
63 "RS690",
64 "RS740",
65 "RV515",
66 "R520",
67 "RV530",
68 "RV560",
69 "RV570",
70 "R580",
71 "R600",
72 "RV610",
73 "RV630",
74 "RV670",
75 "RV620",
76 "RV635",
77 "RS780",
78 "RS880",
79 "RV770",
80 "RV730",
81 "RV710",
82 "RV740",
83 "CEDAR",
84 "REDWOOD",
85 "JUNIPER",
86 "CYPRESS",
87 "HEMLOCK",
88 "PALM",
89 "SUMO",
90 "SUMO2",
91 "BARTS",
92 "TURKS",
93 "CAICOS",
94 "CAYMAN",
95 "ARUBA",
96 "TAHITI",
97 "PITCAIRN",
98 "VERDE",
99 "OLAND",
100 "HAINAN",
101 "BONAIRE",
102 "KAVERI",
103 "KABINI",
104 "HAWAII",
105 "MULLINS",
106 "LAST",
107};
108
109#if defined(CONFIG_VGA_SWITCHEROO)
110bool radeon_has_atpx_dgpu_power_cntl(void);
111bool radeon_is_atpx_hybrid(void);
112#else
113static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
114static inline bool radeon_is_atpx_hybrid(void) { return false; }
115#endif
116
117#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
118
119struct radeon_px_quirk {
120 u32 chip_vendor;
121 u32 chip_device;
122 u32 subsys_vendor;
123 u32 subsys_device;
124 u32 px_quirk_flags;
125};
126
127static struct radeon_px_quirk radeon_px_quirk_list[] = {
128
129
130
131 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
132
133
134
135 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
136
137
138
139 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
140
141
142
143 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
144
145
146
147 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
148 { 0, 0, 0, 0, 0 },
149};
150
151bool radeon_is_px(struct drm_device *dev)
152{
153 struct radeon_device *rdev = dev->dev_private;
154
155 if (rdev->flags & RADEON_IS_PX)
156 return true;
157 return false;
158}
159
160static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
161{
162 struct radeon_px_quirk *p = radeon_px_quirk_list;
163
164
165 while (p && p->chip_device != 0) {
166 if (rdev->pdev->vendor == p->chip_vendor &&
167 rdev->pdev->device == p->chip_device &&
168 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
169 rdev->pdev->subsystem_device == p->subsys_device) {
170 rdev->px_quirk_flags = p->px_quirk_flags;
171 break;
172 }
173 ++p;
174 }
175
176 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
177 rdev->flags &= ~RADEON_IS_PX;
178
179
180 if (!radeon_is_atpx_hybrid() &&
181 !radeon_has_atpx_dgpu_power_cntl())
182 rdev->flags &= ~RADEON_IS_PX;
183}
184
185
186
187
188
189
190
191
192
193
194
195void radeon_program_register_sequence(struct radeon_device *rdev,
196 const u32 *registers,
197 const u32 array_size)
198{
199 u32 tmp, reg, and_mask, or_mask;
200 int i;
201
202 if (array_size % 3)
203 return;
204
205 for (i = 0; i < array_size; i +=3) {
206 reg = registers[i + 0];
207 and_mask = registers[i + 1];
208 or_mask = registers[i + 2];
209
210 if (and_mask == 0xffffffff) {
211 tmp = or_mask;
212 } else {
213 tmp = RREG32(reg);
214 tmp &= ~and_mask;
215 tmp |= or_mask;
216 }
217 WREG32(reg, tmp);
218 }
219}
220
221void radeon_pci_config_reset(struct radeon_device *rdev)
222{
223 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
224}
225
226
227
228
229
230
231
232
233void radeon_surface_init(struct radeon_device *rdev)
234{
235
236 if (rdev->family < CHIP_R600) {
237 int i;
238
239 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
240 if (rdev->surface_regs[i].bo)
241 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
242 else
243 radeon_clear_surface_reg(rdev, i);
244 }
245
246 WREG32(RADEON_SURFACE_CNTL, 0);
247 }
248}
249
250
251
252
253
254
255
256
257
258
259
260void radeon_scratch_init(struct radeon_device *rdev)
261{
262 int i;
263
264
265 if (rdev->family < CHIP_R300) {
266 rdev->scratch.num_reg = 5;
267 } else {
268 rdev->scratch.num_reg = 7;
269 }
270 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
271 for (i = 0; i < rdev->scratch.num_reg; i++) {
272 rdev->scratch.free[i] = true;
273 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
274 }
275}
276
277
278
279
280
281
282
283
284
285
286int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
287{
288 int i;
289
290 for (i = 0; i < rdev->scratch.num_reg; i++) {
291 if (rdev->scratch.free[i]) {
292 rdev->scratch.free[i] = false;
293 *reg = rdev->scratch.reg[i];
294 return 0;
295 }
296 }
297 return -EINVAL;
298}
299
300
301
302
303
304
305
306
307
308void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
309{
310 int i;
311
312 for (i = 0; i < rdev->scratch.num_reg; i++) {
313 if (rdev->scratch.reg[i] == reg) {
314 rdev->scratch.free[i] = true;
315 return;
316 }
317 }
318}
319
320
321
322
323
324
325
326
327
328
329
330
331static int radeon_doorbell_init(struct radeon_device *rdev)
332{
333
334 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
335 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
336
337 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
338 if (rdev->doorbell.num_doorbells == 0)
339 return -EINVAL;
340
341 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
342 if (rdev->doorbell.ptr == NULL) {
343 return -ENOMEM;
344 }
345 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
346 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
347
348 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
349
350 return 0;
351}
352
353
354
355
356
357
358
359
360static void radeon_doorbell_fini(struct radeon_device *rdev)
361{
362 iounmap(rdev->doorbell.ptr);
363 rdev->doorbell.ptr = NULL;
364}
365
366
367
368
369
370
371
372
373
374
375int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
376{
377 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
378 if (offset < rdev->doorbell.num_doorbells) {
379 __set_bit(offset, rdev->doorbell.used);
380 *doorbell = offset;
381 return 0;
382 } else {
383 return -EINVAL;
384 }
385}
386
387
388
389
390
391
392
393
394
395void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
396{
397 if (doorbell < rdev->doorbell.num_doorbells)
398 __clear_bit(doorbell, rdev->doorbell.used);
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415void radeon_wb_disable(struct radeon_device *rdev)
416{
417 rdev->wb.enabled = false;
418}
419
420
421
422
423
424
425
426
427
428void radeon_wb_fini(struct radeon_device *rdev)
429{
430 radeon_wb_disable(rdev);
431 if (rdev->wb.wb_obj) {
432 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
433 radeon_bo_kunmap(rdev->wb.wb_obj);
434 radeon_bo_unpin(rdev->wb.wb_obj);
435 radeon_bo_unreserve(rdev->wb.wb_obj);
436 }
437 radeon_bo_unref(&rdev->wb.wb_obj);
438 rdev->wb.wb = NULL;
439 rdev->wb.wb_obj = NULL;
440 }
441}
442
443
444
445
446
447
448
449
450
451
452int radeon_wb_init(struct radeon_device *rdev)
453{
454 int r;
455
456 if (rdev->wb.wb_obj == NULL) {
457 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
458 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
459 &rdev->wb.wb_obj);
460 if (r) {
461 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
462 return r;
463 }
464 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
465 if (unlikely(r != 0)) {
466 radeon_wb_fini(rdev);
467 return r;
468 }
469 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
470 &rdev->wb.gpu_addr);
471 if (r) {
472 radeon_bo_unreserve(rdev->wb.wb_obj);
473 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
474 radeon_wb_fini(rdev);
475 return r;
476 }
477 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
478 radeon_bo_unreserve(rdev->wb.wb_obj);
479 if (r) {
480 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
481 radeon_wb_fini(rdev);
482 return r;
483 }
484 }
485
486
487 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
488
489 rdev->wb.use_event = false;
490
491 if (radeon_no_wb == 1) {
492 rdev->wb.enabled = false;
493 } else {
494 if (rdev->flags & RADEON_IS_AGP) {
495
496 rdev->wb.enabled = false;
497 } else if (rdev->family < CHIP_R300) {
498
499 rdev->wb.enabled = false;
500 } else {
501 rdev->wb.enabled = true;
502
503 if (rdev->family >= CHIP_R600) {
504 rdev->wb.use_event = true;
505 }
506 }
507 }
508
509 if (rdev->family >= CHIP_PALM) {
510 rdev->wb.enabled = true;
511 rdev->wb.use_event = true;
512 }
513
514 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
515
516 return 0;
517}
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
561{
562 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
563
564 mc->vram_start = base;
565 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
566 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
567 mc->real_vram_size = mc->aper_size;
568 mc->mc_vram_size = mc->aper_size;
569 }
570 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
571 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
572 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
573 mc->real_vram_size = mc->aper_size;
574 mc->mc_vram_size = mc->aper_size;
575 }
576 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
577 if (limit && limit < mc->real_vram_size)
578 mc->real_vram_size = limit;
579 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
580 mc->mc_vram_size >> 20, mc->vram_start,
581 mc->vram_end, mc->real_vram_size >> 20);
582}
583
584
585
586
587
588
589
590
591
592
593
594
595
596void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
597{
598 u64 size_af, size_bf;
599
600 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
601 size_bf = mc->vram_start & ~mc->gtt_base_align;
602 if (size_bf > size_af) {
603 if (mc->gtt_size > size_bf) {
604 dev_warn(rdev->dev, "limiting GTT\n");
605 mc->gtt_size = size_bf;
606 }
607 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
608 } else {
609 if (mc->gtt_size > size_af) {
610 dev_warn(rdev->dev, "limiting GTT\n");
611 mc->gtt_size = size_af;
612 }
613 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
614 }
615 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
616 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
617 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
618}
619
620
621
622
623
624
625
626
627
628
629
630
631bool radeon_device_is_virtual(void)
632{
633#ifdef CONFIG_X86
634 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
635#else
636 return false;
637#endif
638}
639
640
641
642
643
644
645
646
647
648
649bool radeon_card_posted(struct radeon_device *rdev)
650{
651 uint32_t reg;
652
653
654 if (rdev->family >= CHIP_BONAIRE &&
655 radeon_device_is_virtual())
656 return false;
657
658
659 if (efi_enabled(EFI_BOOT) &&
660 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
661 (rdev->family < CHIP_R600))
662 return false;
663
664 if (ASIC_IS_NODCE(rdev))
665 goto check_memsize;
666
667
668 if (ASIC_IS_DCE4(rdev)) {
669 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
670 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
671 if (rdev->num_crtc >= 4) {
672 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
673 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
674 }
675 if (rdev->num_crtc >= 6) {
676 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
677 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
678 }
679 if (reg & EVERGREEN_CRTC_MASTER_EN)
680 return true;
681 } else if (ASIC_IS_AVIVO(rdev)) {
682 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
683 RREG32(AVIVO_D2CRTC_CONTROL);
684 if (reg & AVIVO_CRTC_EN) {
685 return true;
686 }
687 } else {
688 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
689 RREG32(RADEON_CRTC2_GEN_CNTL);
690 if (reg & RADEON_CRTC_EN) {
691 return true;
692 }
693 }
694
695check_memsize:
696
697 if (rdev->family >= CHIP_R600)
698 reg = RREG32(R600_CONFIG_MEMSIZE);
699 else
700 reg = RREG32(RADEON_CONFIG_MEMSIZE);
701
702 if (reg)
703 return true;
704
705 return false;
706
707}
708
709
710
711
712
713
714
715
716
717void radeon_update_bandwidth_info(struct radeon_device *rdev)
718{
719 fixed20_12 a;
720 u32 sclk = rdev->pm.current_sclk;
721 u32 mclk = rdev->pm.current_mclk;
722
723
724 a.full = dfixed_const(100);
725 rdev->pm.sclk.full = dfixed_const(sclk);
726 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
727 rdev->pm.mclk.full = dfixed_const(mclk);
728 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
729
730 if (rdev->flags & RADEON_IS_IGP) {
731 a.full = dfixed_const(16);
732
733 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
734 }
735}
736
737
738
739
740
741
742
743
744
745
746bool radeon_boot_test_post_card(struct radeon_device *rdev)
747{
748 if (radeon_card_posted(rdev))
749 return true;
750
751 if (rdev->bios) {
752 DRM_INFO("GPU not posted. posting now...\n");
753 if (rdev->is_atom_bios)
754 atom_asic_init(rdev->mode_info.atom_context);
755 else
756 radeon_combios_asic_init(rdev->ddev);
757 return true;
758 } else {
759 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
760 return false;
761 }
762}
763
764
765
766
767
768
769
770
771
772
773
774int radeon_dummy_page_init(struct radeon_device *rdev)
775{
776 if (rdev->dummy_page.page)
777 return 0;
778 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
779 if (rdev->dummy_page.page == NULL)
780 return -ENOMEM;
781 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
782 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
783 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
784 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
785 __free_page(rdev->dummy_page.page);
786 rdev->dummy_page.page = NULL;
787 return -ENOMEM;
788 }
789 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
790 RADEON_GART_PAGE_DUMMY);
791 return 0;
792}
793
794
795
796
797
798
799
800
801void radeon_dummy_page_fini(struct radeon_device *rdev)
802{
803 if (rdev->dummy_page.page == NULL)
804 return;
805 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
806 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
807 __free_page(rdev->dummy_page.page);
808 rdev->dummy_page.page = NULL;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
831{
832 struct radeon_device *rdev = info->dev->dev_private;
833 uint32_t r;
834
835 r = rdev->pll_rreg(rdev, reg);
836 return r;
837}
838
839
840
841
842
843
844
845
846
847
848static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
849{
850 struct radeon_device *rdev = info->dev->dev_private;
851
852 rdev->pll_wreg(rdev, reg, val);
853}
854
855
856
857
858
859
860
861
862
863
864static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
865{
866 struct radeon_device *rdev = info->dev->dev_private;
867 uint32_t r;
868
869 r = rdev->mc_rreg(rdev, reg);
870 return r;
871}
872
873
874
875
876
877
878
879
880
881
882static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
883{
884 struct radeon_device *rdev = info->dev->dev_private;
885
886 rdev->mc_wreg(rdev, reg, val);
887}
888
889
890
891
892
893
894
895
896
897
898static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
899{
900 struct radeon_device *rdev = info->dev->dev_private;
901
902 WREG32(reg*4, val);
903}
904
905
906
907
908
909
910
911
912
913
914static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
915{
916 struct radeon_device *rdev = info->dev->dev_private;
917 uint32_t r;
918
919 r = RREG32(reg*4);
920 return r;
921}
922
923
924
925
926
927
928
929
930
931
932static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
933{
934 struct radeon_device *rdev = info->dev->dev_private;
935
936 WREG32_IO(reg*4, val);
937}
938
939
940
941
942
943
944
945
946
947
948static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
949{
950 struct radeon_device *rdev = info->dev->dev_private;
951 uint32_t r;
952
953 r = RREG32_IO(reg*4);
954 return r;
955}
956
957
958
959
960
961
962
963
964
965
966
967int radeon_atombios_init(struct radeon_device *rdev)
968{
969 struct card_info *atom_card_info =
970 kzalloc(sizeof(struct card_info), GFP_KERNEL);
971
972 if (!atom_card_info)
973 return -ENOMEM;
974
975 rdev->mode_info.atom_card_info = atom_card_info;
976 atom_card_info->dev = rdev->ddev;
977 atom_card_info->reg_read = cail_reg_read;
978 atom_card_info->reg_write = cail_reg_write;
979
980 if (rdev->rio_mem) {
981 atom_card_info->ioreg_read = cail_ioreg_read;
982 atom_card_info->ioreg_write = cail_ioreg_write;
983 } else {
984 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
985 atom_card_info->ioreg_read = cail_reg_read;
986 atom_card_info->ioreg_write = cail_reg_write;
987 }
988 atom_card_info->mc_read = cail_mc_read;
989 atom_card_info->mc_write = cail_mc_write;
990 atom_card_info->pll_read = cail_pll_read;
991 atom_card_info->pll_write = cail_pll_write;
992
993 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
994 if (!rdev->mode_info.atom_context) {
995 radeon_atombios_fini(rdev);
996 return -ENOMEM;
997 }
998
999 mutex_init(&rdev->mode_info.atom_context->mutex);
1000 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1001 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1002 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1003 return 0;
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015void radeon_atombios_fini(struct radeon_device *rdev)
1016{
1017 if (rdev->mode_info.atom_context) {
1018 kfree(rdev->mode_info.atom_context->scratch);
1019 }
1020 kfree(rdev->mode_info.atom_context);
1021 rdev->mode_info.atom_context = NULL;
1022 kfree(rdev->mode_info.atom_card_info);
1023 rdev->mode_info.atom_card_info = NULL;
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042int radeon_combios_init(struct radeon_device *rdev)
1043{
1044 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1045 return 0;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056void radeon_combios_fini(struct radeon_device *rdev)
1057{
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1071{
1072 struct radeon_device *rdev = cookie;
1073 radeon_vga_set_state(rdev, state);
1074 if (state)
1075 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1076 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1077 else
1078 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static bool radeon_check_pot_argument(int arg)
1090{
1091 return (arg & (arg - 1)) == 0;
1092}
1093
1094
1095
1096
1097
1098
1099static int radeon_gart_size_auto(enum radeon_family family)
1100{
1101
1102 if (family >= CHIP_TAHITI)
1103 return 2048;
1104 else if (family >= CHIP_RV770)
1105 return 1024;
1106 else
1107 return 512;
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118static void radeon_check_arguments(struct radeon_device *rdev)
1119{
1120
1121 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1122 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1123 radeon_vram_limit);
1124 radeon_vram_limit = 0;
1125 }
1126
1127 if (radeon_gart_size == -1) {
1128 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1129 }
1130
1131 if (radeon_gart_size < 32) {
1132 dev_warn(rdev->dev, "gart size (%d) too small\n",
1133 radeon_gart_size);
1134 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1135 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1136 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1137 radeon_gart_size);
1138 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1139 }
1140 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1141
1142
1143 switch (radeon_agpmode) {
1144 case -1:
1145 case 0:
1146 case 1:
1147 case 2:
1148 case 4:
1149 case 8:
1150 break;
1151 default:
1152 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1153 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1154 radeon_agpmode = 0;
1155 break;
1156 }
1157
1158 if (!radeon_check_pot_argument(radeon_vm_size)) {
1159 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1160 radeon_vm_size);
1161 radeon_vm_size = 4;
1162 }
1163
1164 if (radeon_vm_size < 1) {
1165 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1166 radeon_vm_size);
1167 radeon_vm_size = 4;
1168 }
1169
1170
1171
1172
1173 if (radeon_vm_size > 1024) {
1174 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1175 radeon_vm_size);
1176 radeon_vm_size = 4;
1177 }
1178
1179
1180
1181
1182 if (radeon_vm_block_size == -1) {
1183
1184
1185 unsigned bits = ilog2(radeon_vm_size) + 18;
1186
1187
1188
1189 if (radeon_vm_size <= 8)
1190 radeon_vm_block_size = bits - 9;
1191 else
1192 radeon_vm_block_size = (bits + 3) / 2;
1193
1194 } else if (radeon_vm_block_size < 9) {
1195 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1196 radeon_vm_block_size);
1197 radeon_vm_block_size = 9;
1198 }
1199
1200 if (radeon_vm_block_size > 24 ||
1201 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1202 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1203 radeon_vm_block_size);
1204 radeon_vm_block_size = 9;
1205 }
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1218{
1219 struct drm_device *dev = pci_get_drvdata(pdev);
1220
1221 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1222 return;
1223
1224 if (state == VGA_SWITCHEROO_ON) {
1225 pr_info("radeon: switched on\n");
1226
1227 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1228
1229 radeon_resume_kms(dev, true, true);
1230
1231 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1232 drm_kms_helper_poll_enable(dev);
1233 } else {
1234 pr_info("radeon: switched off\n");
1235 drm_kms_helper_poll_disable(dev);
1236 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1237 radeon_suspend_kms(dev, true, true, false);
1238 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1239 }
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1252{
1253 struct drm_device *dev = pci_get_drvdata(pdev);
1254
1255
1256
1257
1258
1259
1260 return dev->open_count == 0;
1261}
1262
1263static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1264 .set_gpu_state = radeon_switcheroo_set_state,
1265 .reprobe = NULL,
1266 .can_switch = radeon_switcheroo_can_switch,
1267};
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281int radeon_device_init(struct radeon_device *rdev,
1282 struct drm_device *ddev,
1283 struct pci_dev *pdev,
1284 uint32_t flags)
1285{
1286 int r, i;
1287 int dma_bits;
1288 bool runtime = false;
1289
1290 rdev->shutdown = false;
1291 rdev->dev = &pdev->dev;
1292 rdev->ddev = ddev;
1293 rdev->pdev = pdev;
1294 rdev->flags = flags;
1295 rdev->family = flags & RADEON_FAMILY_MASK;
1296 rdev->is_atom_bios = false;
1297 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1298 rdev->mc.gtt_size = 512 * 1024 * 1024;
1299 rdev->accel_working = false;
1300
1301 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1302 rdev->ring[i].idx = i;
1303 }
1304 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1305
1306 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1307 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1308 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1309
1310
1311
1312 mutex_init(&rdev->ring_lock);
1313 mutex_init(&rdev->dc_hw_i2c_mutex);
1314 atomic_set(&rdev->ih.lock, 0);
1315 mutex_init(&rdev->gem.mutex);
1316 mutex_init(&rdev->pm.mutex);
1317 mutex_init(&rdev->gpu_clock_mutex);
1318 mutex_init(&rdev->srbm_mutex);
1319 init_rwsem(&rdev->pm.mclk_lock);
1320 init_rwsem(&rdev->exclusive_lock);
1321 init_waitqueue_head(&rdev->irq.vblank_queue);
1322 mutex_init(&rdev->mn_lock);
1323 hash_init(rdev->mn_hash);
1324 r = radeon_gem_init(rdev);
1325 if (r)
1326 return r;
1327
1328 radeon_check_arguments(rdev);
1329
1330
1331
1332 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1333
1334
1335 r = radeon_asic_init(rdev);
1336 if (r)
1337 return r;
1338
1339
1340
1341
1342 if ((rdev->family >= CHIP_RS400) &&
1343 (rdev->flags & RADEON_IS_IGP)) {
1344 rdev->flags &= ~RADEON_IS_AGP;
1345 }
1346
1347 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1348 radeon_agp_disable(rdev);
1349 }
1350
1351
1352
1353
1354
1355 if (rdev->family >= CHIP_CAYMAN)
1356 rdev->mc.mc_mask = 0xffffffffffULL;
1357 else if (rdev->family >= CHIP_CEDAR)
1358 rdev->mc.mc_mask = 0xfffffffffULL;
1359 else
1360 rdev->mc.mc_mask = 0xffffffffULL;
1361
1362
1363
1364
1365
1366
1367
1368 rdev->need_dma32 = false;
1369 if (rdev->flags & RADEON_IS_AGP)
1370 rdev->need_dma32 = true;
1371 if ((rdev->flags & RADEON_IS_PCI) &&
1372 (rdev->family <= CHIP_RS740))
1373 rdev->need_dma32 = true;
1374#ifdef CONFIG_PPC64
1375 if (rdev->family == CHIP_CEDAR)
1376 rdev->need_dma32 = true;
1377#endif
1378
1379 dma_bits = rdev->need_dma32 ? 32 : 40;
1380 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1381 if (r) {
1382 rdev->need_dma32 = true;
1383 dma_bits = 32;
1384 pr_warn("radeon: No suitable DMA available\n");
1385 }
1386 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1387 if (r) {
1388 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1389 pr_warn("radeon: No coherent DMA available\n");
1390 }
1391 rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1392
1393
1394
1395 spin_lock_init(&rdev->mmio_idx_lock);
1396 spin_lock_init(&rdev->smc_idx_lock);
1397 spin_lock_init(&rdev->pll_idx_lock);
1398 spin_lock_init(&rdev->mc_idx_lock);
1399 spin_lock_init(&rdev->pcie_idx_lock);
1400 spin_lock_init(&rdev->pciep_idx_lock);
1401 spin_lock_init(&rdev->pif_idx_lock);
1402 spin_lock_init(&rdev->cg_idx_lock);
1403 spin_lock_init(&rdev->uvd_idx_lock);
1404 spin_lock_init(&rdev->rcu_idx_lock);
1405 spin_lock_init(&rdev->didt_idx_lock);
1406 spin_lock_init(&rdev->end_idx_lock);
1407 if (rdev->family >= CHIP_BONAIRE) {
1408 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1409 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1410 } else {
1411 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1412 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1413 }
1414 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1415 if (rdev->rmmio == NULL)
1416 return -ENOMEM;
1417
1418
1419 if (rdev->family >= CHIP_BONAIRE)
1420 radeon_doorbell_init(rdev);
1421
1422
1423 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1424 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1425 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1426 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1427 break;
1428 }
1429 }
1430 if (rdev->rio_mem == NULL)
1431 DRM_ERROR("Unable to find PCI I/O BAR\n");
1432
1433 if (rdev->flags & RADEON_IS_PX)
1434 radeon_device_handle_px_quirks(rdev);
1435
1436
1437
1438
1439 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1440
1441 if (rdev->flags & RADEON_IS_PX)
1442 runtime = true;
1443 if (!pci_is_thunderbolt_attached(rdev->pdev))
1444 vga_switcheroo_register_client(rdev->pdev,
1445 &radeon_switcheroo_ops, runtime);
1446 if (runtime)
1447 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1448
1449 r = radeon_init(rdev);
1450 if (r)
1451 goto failed;
1452
1453 r = radeon_gem_debugfs_init(rdev);
1454 if (r) {
1455 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1456 }
1457
1458 r = radeon_mst_debugfs_init(rdev);
1459 if (r) {
1460 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1461 }
1462
1463 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1464
1465
1466
1467 radeon_asic_reset(rdev);
1468 radeon_fini(rdev);
1469 radeon_agp_disable(rdev);
1470 r = radeon_init(rdev);
1471 if (r)
1472 goto failed;
1473 }
1474
1475 r = radeon_ib_ring_tests(rdev);
1476 if (r)
1477 DRM_ERROR("ib ring test failed (%d).\n", r);
1478
1479
1480
1481
1482
1483
1484 if (rdev->pm.dpm_enabled &&
1485 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1486 (rdev->family == CHIP_TURKS) &&
1487 (rdev->flags & RADEON_IS_MOBILITY)) {
1488 mutex_lock(&rdev->pm.mutex);
1489 radeon_dpm_disable(rdev);
1490 radeon_dpm_enable(rdev);
1491 mutex_unlock(&rdev->pm.mutex);
1492 }
1493
1494 if ((radeon_testing & 1)) {
1495 if (rdev->accel_working)
1496 radeon_test_moves(rdev);
1497 else
1498 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1499 }
1500 if ((radeon_testing & 2)) {
1501 if (rdev->accel_working)
1502 radeon_test_syncing(rdev);
1503 else
1504 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1505 }
1506 if (radeon_benchmarking) {
1507 if (rdev->accel_working)
1508 radeon_benchmark(rdev, radeon_benchmarking);
1509 else
1510 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1511 }
1512 return 0;
1513
1514failed:
1515
1516 if (radeon_is_px(ddev))
1517 pm_runtime_put_noidle(ddev->dev);
1518 if (runtime)
1519 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1520 return r;
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531void radeon_device_fini(struct radeon_device *rdev)
1532{
1533 DRM_INFO("radeon: finishing device.\n");
1534 rdev->shutdown = true;
1535
1536 radeon_bo_evict_vram(rdev);
1537 radeon_fini(rdev);
1538 if (!pci_is_thunderbolt_attached(rdev->pdev))
1539 vga_switcheroo_unregister_client(rdev->pdev);
1540 if (rdev->flags & RADEON_IS_PX)
1541 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1542 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1543 if (rdev->rio_mem)
1544 pci_iounmap(rdev->pdev, rdev->rio_mem);
1545 rdev->rio_mem = NULL;
1546 iounmap(rdev->rmmio);
1547 rdev->rmmio = NULL;
1548 if (rdev->family >= CHIP_BONAIRE)
1549 radeon_doorbell_fini(rdev);
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1567 bool fbcon, bool freeze)
1568{
1569 struct radeon_device *rdev;
1570 struct drm_crtc *crtc;
1571 struct drm_connector *connector;
1572 int i, r;
1573
1574 if (dev == NULL || dev->dev_private == NULL) {
1575 return -ENODEV;
1576 }
1577
1578 rdev = dev->dev_private;
1579
1580 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1581 return 0;
1582
1583 drm_kms_helper_poll_disable(dev);
1584
1585 drm_modeset_lock_all(dev);
1586
1587 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1588 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1589 }
1590 drm_modeset_unlock_all(dev);
1591
1592
1593 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1594 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1595 struct drm_framebuffer *fb = crtc->primary->fb;
1596 struct radeon_bo *robj;
1597
1598 if (radeon_crtc->cursor_bo) {
1599 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1600 r = radeon_bo_reserve(robj, false);
1601 if (r == 0) {
1602 radeon_bo_unpin(robj);
1603 radeon_bo_unreserve(robj);
1604 }
1605 }
1606
1607 if (fb == NULL || fb->obj[0] == NULL) {
1608 continue;
1609 }
1610 robj = gem_to_radeon_bo(fb->obj[0]);
1611
1612 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1613 r = radeon_bo_reserve(robj, false);
1614 if (r == 0) {
1615 radeon_bo_unpin(robj);
1616 radeon_bo_unreserve(robj);
1617 }
1618 }
1619 }
1620
1621 radeon_bo_evict_vram(rdev);
1622
1623
1624 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1625 r = radeon_fence_wait_empty(rdev, i);
1626 if (r) {
1627
1628 radeon_fence_driver_force_completion(rdev, i);
1629 }
1630 }
1631
1632 radeon_save_bios_scratch_regs(rdev);
1633
1634 radeon_suspend(rdev);
1635 radeon_hpd_fini(rdev);
1636
1637
1638
1639
1640 radeon_bo_evict_vram(rdev);
1641
1642 radeon_agp_suspend(rdev);
1643
1644 pci_save_state(dev->pdev);
1645 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1646 rdev->asic->asic_reset(rdev, true);
1647 pci_restore_state(dev->pdev);
1648 } else if (suspend) {
1649
1650 pci_disable_device(dev->pdev);
1651 pci_set_power_state(dev->pdev, PCI_D3hot);
1652 }
1653
1654 if (fbcon) {
1655 console_lock();
1656 radeon_fbdev_set_suspend(rdev, 1);
1657 console_unlock();
1658 }
1659 return 0;
1660}
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1672{
1673 struct drm_connector *connector;
1674 struct radeon_device *rdev = dev->dev_private;
1675 struct drm_crtc *crtc;
1676 int r;
1677
1678 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1679 return 0;
1680
1681 if (fbcon) {
1682 console_lock();
1683 }
1684 if (resume) {
1685 pci_set_power_state(dev->pdev, PCI_D0);
1686 pci_restore_state(dev->pdev);
1687 if (pci_enable_device(dev->pdev)) {
1688 if (fbcon)
1689 console_unlock();
1690 return -1;
1691 }
1692 }
1693
1694 radeon_agp_resume(rdev);
1695 radeon_resume(rdev);
1696
1697 r = radeon_ib_ring_tests(rdev);
1698 if (r)
1699 DRM_ERROR("ib ring test failed (%d).\n", r);
1700
1701 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1702
1703 r = radeon_pm_late_init(rdev);
1704 if (r) {
1705 rdev->pm.dpm_enabled = false;
1706 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1707 }
1708 } else {
1709
1710 radeon_pm_resume(rdev);
1711 }
1712
1713 radeon_restore_bios_scratch_regs(rdev);
1714
1715
1716 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1717 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1718
1719 if (radeon_crtc->cursor_bo) {
1720 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1721 r = radeon_bo_reserve(robj, false);
1722 if (r == 0) {
1723
1724 r = radeon_bo_pin_restricted(robj,
1725 RADEON_GEM_DOMAIN_VRAM,
1726 ASIC_IS_AVIVO(rdev) ?
1727 0 : 1 << 27,
1728 &radeon_crtc->cursor_addr);
1729 if (r != 0)
1730 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1731 radeon_bo_unreserve(robj);
1732 }
1733 }
1734 }
1735
1736
1737 if (rdev->is_atom_bios) {
1738 radeon_atom_encoder_init(rdev);
1739 radeon_atom_disp_eng_pll_init(rdev);
1740
1741 if (rdev->mode_info.bl_encoder) {
1742 u8 bl_level = radeon_get_backlight_level(rdev,
1743 rdev->mode_info.bl_encoder);
1744 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1745 bl_level);
1746 }
1747 }
1748
1749 radeon_hpd_init(rdev);
1750
1751 if (fbcon) {
1752 drm_helper_resume_force_mode(dev);
1753
1754 drm_modeset_lock_all(dev);
1755 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1756 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1757 }
1758 drm_modeset_unlock_all(dev);
1759 }
1760
1761 drm_kms_helper_poll_enable(dev);
1762
1763
1764 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1765 radeon_pm_compute_clocks(rdev);
1766
1767 if (fbcon) {
1768 radeon_fbdev_set_suspend(rdev, 0);
1769 console_unlock();
1770 }
1771
1772 return 0;
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783int radeon_gpu_reset(struct radeon_device *rdev)
1784{
1785 unsigned ring_sizes[RADEON_NUM_RINGS];
1786 uint32_t *ring_data[RADEON_NUM_RINGS];
1787
1788 bool saved = false;
1789
1790 int i, r;
1791 int resched;
1792
1793 down_write(&rdev->exclusive_lock);
1794
1795 if (!rdev->needs_reset) {
1796 up_write(&rdev->exclusive_lock);
1797 return 0;
1798 }
1799
1800 atomic_inc(&rdev->gpu_reset_counter);
1801
1802 radeon_save_bios_scratch_regs(rdev);
1803
1804 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1805 radeon_suspend(rdev);
1806 radeon_hpd_fini(rdev);
1807
1808 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1809 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1810 &ring_data[i]);
1811 if (ring_sizes[i]) {
1812 saved = true;
1813 dev_info(rdev->dev, "Saved %d dwords of commands "
1814 "on ring %d.\n", ring_sizes[i], i);
1815 }
1816 }
1817
1818 r = radeon_asic_reset(rdev);
1819 if (!r) {
1820 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1821 radeon_resume(rdev);
1822 }
1823
1824 radeon_restore_bios_scratch_regs(rdev);
1825
1826 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1827 if (!r && ring_data[i]) {
1828 radeon_ring_restore(rdev, &rdev->ring[i],
1829 ring_sizes[i], ring_data[i]);
1830 } else {
1831 radeon_fence_driver_force_completion(rdev, i);
1832 kfree(ring_data[i]);
1833 }
1834 }
1835
1836 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1837
1838 r = radeon_pm_late_init(rdev);
1839 if (r) {
1840 rdev->pm.dpm_enabled = false;
1841 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1842 }
1843 } else {
1844
1845 radeon_pm_resume(rdev);
1846 }
1847
1848
1849 if (rdev->is_atom_bios) {
1850 radeon_atom_encoder_init(rdev);
1851 radeon_atom_disp_eng_pll_init(rdev);
1852
1853 if (rdev->mode_info.bl_encoder) {
1854 u8 bl_level = radeon_get_backlight_level(rdev,
1855 rdev->mode_info.bl_encoder);
1856 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1857 bl_level);
1858 }
1859 }
1860
1861 radeon_hpd_init(rdev);
1862
1863 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1864
1865 rdev->in_reset = true;
1866 rdev->needs_reset = false;
1867
1868 downgrade_write(&rdev->exclusive_lock);
1869
1870 drm_helper_resume_force_mode(rdev->ddev);
1871
1872
1873 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1874 radeon_pm_compute_clocks(rdev);
1875
1876 if (!r) {
1877 r = radeon_ib_ring_tests(rdev);
1878 if (r && saved)
1879 r = -EAGAIN;
1880 } else {
1881
1882 dev_info(rdev->dev, "GPU reset failed\n");
1883 }
1884
1885 rdev->needs_reset = r == -EAGAIN;
1886 rdev->in_reset = false;
1887
1888 up_read(&rdev->exclusive_lock);
1889 return r;
1890}
1891
1892
1893
1894
1895
1896int radeon_debugfs_add_files(struct radeon_device *rdev,
1897 struct drm_info_list *files,
1898 unsigned nfiles)
1899{
1900 unsigned i;
1901
1902 for (i = 0; i < rdev->debugfs_count; i++) {
1903 if (rdev->debugfs[i].files == files) {
1904
1905 return 0;
1906 }
1907 }
1908
1909 i = rdev->debugfs_count + 1;
1910 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1911 DRM_ERROR("Reached maximum number of debugfs components.\n");
1912 DRM_ERROR("Report so we increase "
1913 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1914 return -EINVAL;
1915 }
1916 rdev->debugfs[rdev->debugfs_count].files = files;
1917 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1918 rdev->debugfs_count = i;
1919#if defined(CONFIG_DEBUG_FS)
1920 drm_debugfs_create_files(files, nfiles,
1921 rdev->ddev->primary->debugfs_root,
1922 rdev->ddev->primary);
1923#endif
1924 return 0;
1925}
1926