1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/pm_runtime.h>
34#include <linux/vgaarb.h>
35#include <linux/vga_switcheroo.h>
36#include <linux/efi.h>
37#include "radeon_reg.h"
38#include "radeon.h"
39#include "atom.h"
40
41static const char radeon_family_name[][16] = {
42 "R100",
43 "RV100",
44 "RS100",
45 "RV200",
46 "RS200",
47 "R200",
48 "RV250",
49 "RS300",
50 "RV280",
51 "R300",
52 "R350",
53 "RV350",
54 "RV380",
55 "R420",
56 "R423",
57 "RV410",
58 "RS400",
59 "RS480",
60 "RS600",
61 "RS690",
62 "RS740",
63 "RV515",
64 "R520",
65 "RV530",
66 "RV560",
67 "RV570",
68 "R580",
69 "R600",
70 "RV610",
71 "RV630",
72 "RV670",
73 "RV620",
74 "RV635",
75 "RS780",
76 "RS880",
77 "RV770",
78 "RV730",
79 "RV710",
80 "RV740",
81 "CEDAR",
82 "REDWOOD",
83 "JUNIPER",
84 "CYPRESS",
85 "HEMLOCK",
86 "PALM",
87 "SUMO",
88 "SUMO2",
89 "BARTS",
90 "TURKS",
91 "CAICOS",
92 "CAYMAN",
93 "ARUBA",
94 "TAHITI",
95 "PITCAIRN",
96 "VERDE",
97 "OLAND",
98 "HAINAN",
99 "BONAIRE",
100 "KAVERI",
101 "KABINI",
102 "HAWAII",
103 "MULLINS",
104 "LAST",
105};
106
107#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
108#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
109
110struct radeon_px_quirk {
111 u32 chip_vendor;
112 u32 chip_device;
113 u32 subsys_vendor;
114 u32 subsys_device;
115 u32 px_quirk_flags;
116};
117
118static struct radeon_px_quirk radeon_px_quirk_list[] = {
119
120
121
122 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
123
124
125
126 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
127
128
129
130 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
131
132 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
133 { 0, 0, 0, 0, 0 },
134};
135
136bool radeon_is_px(struct drm_device *dev)
137{
138 struct radeon_device *rdev = dev->dev_private;
139
140 if (rdev->flags & RADEON_IS_PX)
141 return true;
142 return false;
143}
144
145static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
146{
147 struct radeon_px_quirk *p = radeon_px_quirk_list;
148
149
150 while (p && p->chip_device != 0) {
151 if (rdev->pdev->vendor == p->chip_vendor &&
152 rdev->pdev->device == p->chip_device &&
153 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
154 rdev->pdev->subsystem_device == p->subsys_device) {
155 rdev->px_quirk_flags = p->px_quirk_flags;
156 break;
157 }
158 ++p;
159 }
160
161 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
162 rdev->flags &= ~RADEON_IS_PX;
163}
164
165
166
167
168
169
170
171
172
173
174
175void radeon_program_register_sequence(struct radeon_device *rdev,
176 const u32 *registers,
177 const u32 array_size)
178{
179 u32 tmp, reg, and_mask, or_mask;
180 int i;
181
182 if (array_size % 3)
183 return;
184
185 for (i = 0; i < array_size; i +=3) {
186 reg = registers[i + 0];
187 and_mask = registers[i + 1];
188 or_mask = registers[i + 2];
189
190 if (and_mask == 0xffffffff) {
191 tmp = or_mask;
192 } else {
193 tmp = RREG32(reg);
194 tmp &= ~and_mask;
195 tmp |= or_mask;
196 }
197 WREG32(reg, tmp);
198 }
199}
200
201void radeon_pci_config_reset(struct radeon_device *rdev)
202{
203 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
204}
205
206
207
208
209
210
211
212
213void radeon_surface_init(struct radeon_device *rdev)
214{
215
216 if (rdev->family < CHIP_R600) {
217 int i;
218
219 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
220 if (rdev->surface_regs[i].bo)
221 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
222 else
223 radeon_clear_surface_reg(rdev, i);
224 }
225
226 WREG32(RADEON_SURFACE_CNTL, 0);
227 }
228}
229
230
231
232
233
234
235
236
237
238
239
240void radeon_scratch_init(struct radeon_device *rdev)
241{
242 int i;
243
244
245 if (rdev->family < CHIP_R300) {
246 rdev->scratch.num_reg = 5;
247 } else {
248 rdev->scratch.num_reg = 7;
249 }
250 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
251 for (i = 0; i < rdev->scratch.num_reg; i++) {
252 rdev->scratch.free[i] = true;
253 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
254 }
255}
256
257
258
259
260
261
262
263
264
265
266int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
267{
268 int i;
269
270 for (i = 0; i < rdev->scratch.num_reg; i++) {
271 if (rdev->scratch.free[i]) {
272 rdev->scratch.free[i] = false;
273 *reg = rdev->scratch.reg[i];
274 return 0;
275 }
276 }
277 return -EINVAL;
278}
279
280
281
282
283
284
285
286
287
288void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
289{
290 int i;
291
292 for (i = 0; i < rdev->scratch.num_reg; i++) {
293 if (rdev->scratch.reg[i] == reg) {
294 rdev->scratch.free[i] = true;
295 return;
296 }
297 }
298}
299
300
301
302
303
304
305
306
307
308
309
310
311static int radeon_doorbell_init(struct radeon_device *rdev)
312{
313
314 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
315 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
316
317 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
318 if (rdev->doorbell.num_doorbells == 0)
319 return -EINVAL;
320
321 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
322 if (rdev->doorbell.ptr == NULL) {
323 return -ENOMEM;
324 }
325 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
326 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
327
328 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
329
330 return 0;
331}
332
333
334
335
336
337
338
339
340static void radeon_doorbell_fini(struct radeon_device *rdev)
341{
342 iounmap(rdev->doorbell.ptr);
343 rdev->doorbell.ptr = NULL;
344}
345
346
347
348
349
350
351
352
353
354
355int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
356{
357 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
358 if (offset < rdev->doorbell.num_doorbells) {
359 __set_bit(offset, rdev->doorbell.used);
360 *doorbell = offset;
361 return 0;
362 } else {
363 return -EINVAL;
364 }
365}
366
367
368
369
370
371
372
373
374
375void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
376{
377 if (doorbell < rdev->doorbell.num_doorbells)
378 __clear_bit(doorbell, rdev->doorbell.used);
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
395 phys_addr_t *aperture_base,
396 size_t *aperture_size,
397 size_t *start_offset)
398{
399
400
401 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
402 *aperture_base = rdev->doorbell.base;
403 *aperture_size = rdev->doorbell.size;
404 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
405 } else {
406 *aperture_base = 0;
407 *aperture_size = 0;
408 *start_offset = 0;
409 }
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426void radeon_wb_disable(struct radeon_device *rdev)
427{
428 rdev->wb.enabled = false;
429}
430
431
432
433
434
435
436
437
438
439void radeon_wb_fini(struct radeon_device *rdev)
440{
441 radeon_wb_disable(rdev);
442 if (rdev->wb.wb_obj) {
443 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
444 radeon_bo_kunmap(rdev->wb.wb_obj);
445 radeon_bo_unpin(rdev->wb.wb_obj);
446 radeon_bo_unreserve(rdev->wb.wb_obj);
447 }
448 radeon_bo_unref(&rdev->wb.wb_obj);
449 rdev->wb.wb = NULL;
450 rdev->wb.wb_obj = NULL;
451 }
452}
453
454
455
456
457
458
459
460
461
462
463int radeon_wb_init(struct radeon_device *rdev)
464{
465 int r;
466
467 if (rdev->wb.wb_obj == NULL) {
468 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
469 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
470 &rdev->wb.wb_obj);
471 if (r) {
472 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
473 return r;
474 }
475 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
476 if (unlikely(r != 0)) {
477 radeon_wb_fini(rdev);
478 return r;
479 }
480 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
481 &rdev->wb.gpu_addr);
482 if (r) {
483 radeon_bo_unreserve(rdev->wb.wb_obj);
484 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
485 radeon_wb_fini(rdev);
486 return r;
487 }
488 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
489 radeon_bo_unreserve(rdev->wb.wb_obj);
490 if (r) {
491 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
492 radeon_wb_fini(rdev);
493 return r;
494 }
495 }
496
497
498 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
499
500 rdev->wb.use_event = false;
501
502 if (radeon_no_wb == 1) {
503 rdev->wb.enabled = false;
504 } else {
505 if (rdev->flags & RADEON_IS_AGP) {
506
507 rdev->wb.enabled = false;
508 } else if (rdev->family < CHIP_R300) {
509
510 rdev->wb.enabled = false;
511 } else {
512 rdev->wb.enabled = true;
513
514 if (rdev->family >= CHIP_R600) {
515 rdev->wb.use_event = true;
516 }
517 }
518 }
519
520 if (rdev->family >= CHIP_PALM) {
521 rdev->wb.enabled = true;
522 rdev->wb.use_event = true;
523 }
524
525 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
526
527 return 0;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
572{
573 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
574
575 mc->vram_start = base;
576 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
577 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
578 mc->real_vram_size = mc->aper_size;
579 mc->mc_vram_size = mc->aper_size;
580 }
581 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
582 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
583 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
584 mc->real_vram_size = mc->aper_size;
585 mc->mc_vram_size = mc->aper_size;
586 }
587 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
588 if (limit && limit < mc->real_vram_size)
589 mc->real_vram_size = limit;
590 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
591 mc->mc_vram_size >> 20, mc->vram_start,
592 mc->vram_end, mc->real_vram_size >> 20);
593}
594
595
596
597
598
599
600
601
602
603
604
605
606
607void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
608{
609 u64 size_af, size_bf;
610
611 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
612 size_bf = mc->vram_start & ~mc->gtt_base_align;
613 if (size_bf > size_af) {
614 if (mc->gtt_size > size_bf) {
615 dev_warn(rdev->dev, "limiting GTT\n");
616 mc->gtt_size = size_bf;
617 }
618 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
619 } else {
620 if (mc->gtt_size > size_af) {
621 dev_warn(rdev->dev, "limiting GTT\n");
622 mc->gtt_size = size_af;
623 }
624 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
625 }
626 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
627 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
628 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
629}
630
631
632
633
634
635
636
637
638
639
640
641
642static bool radeon_device_is_virtual(void)
643{
644#ifdef CONFIG_X86
645 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
646#else
647 return false;
648#endif
649}
650
651
652
653
654
655
656
657
658
659
660bool radeon_card_posted(struct radeon_device *rdev)
661{
662 uint32_t reg;
663
664
665 if (radeon_device_is_virtual())
666 return false;
667
668
669 if (efi_enabled(EFI_BOOT) &&
670 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
671 (rdev->family < CHIP_R600))
672 return false;
673
674 if (ASIC_IS_NODCE(rdev))
675 goto check_memsize;
676
677
678 if (ASIC_IS_DCE4(rdev)) {
679 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
680 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
681 if (rdev->num_crtc >= 4) {
682 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
683 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
684 }
685 if (rdev->num_crtc >= 6) {
686 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
687 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
688 }
689 if (reg & EVERGREEN_CRTC_MASTER_EN)
690 return true;
691 } else if (ASIC_IS_AVIVO(rdev)) {
692 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
693 RREG32(AVIVO_D2CRTC_CONTROL);
694 if (reg & AVIVO_CRTC_EN) {
695 return true;
696 }
697 } else {
698 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
699 RREG32(RADEON_CRTC2_GEN_CNTL);
700 if (reg & RADEON_CRTC_EN) {
701 return true;
702 }
703 }
704
705check_memsize:
706
707 if (rdev->family >= CHIP_R600)
708 reg = RREG32(R600_CONFIG_MEMSIZE);
709 else
710 reg = RREG32(RADEON_CONFIG_MEMSIZE);
711
712 if (reg)
713 return true;
714
715 return false;
716
717}
718
719
720
721
722
723
724
725
726
727void radeon_update_bandwidth_info(struct radeon_device *rdev)
728{
729 fixed20_12 a;
730 u32 sclk = rdev->pm.current_sclk;
731 u32 mclk = rdev->pm.current_mclk;
732
733
734 a.full = dfixed_const(100);
735 rdev->pm.sclk.full = dfixed_const(sclk);
736 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
737 rdev->pm.mclk.full = dfixed_const(mclk);
738 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
739
740 if (rdev->flags & RADEON_IS_IGP) {
741 a.full = dfixed_const(16);
742
743 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
744 }
745}
746
747
748
749
750
751
752
753
754
755
756bool radeon_boot_test_post_card(struct radeon_device *rdev)
757{
758 if (radeon_card_posted(rdev))
759 return true;
760
761 if (rdev->bios) {
762 DRM_INFO("GPU not posted. posting now...\n");
763 if (rdev->is_atom_bios)
764 atom_asic_init(rdev->mode_info.atom_context);
765 else
766 radeon_combios_asic_init(rdev->ddev);
767 return true;
768 } else {
769 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
770 return false;
771 }
772}
773
774
775
776
777
778
779
780
781
782
783
784int radeon_dummy_page_init(struct radeon_device *rdev)
785{
786 if (rdev->dummy_page.page)
787 return 0;
788 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
789 if (rdev->dummy_page.page == NULL)
790 return -ENOMEM;
791 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
792 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
793 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
794 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
795 __free_page(rdev->dummy_page.page);
796 rdev->dummy_page.page = NULL;
797 return -ENOMEM;
798 }
799 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
800 RADEON_GART_PAGE_DUMMY);
801 return 0;
802}
803
804
805
806
807
808
809
810
811void radeon_dummy_page_fini(struct radeon_device *rdev)
812{
813 if (rdev->dummy_page.page == NULL)
814 return;
815 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
816 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
817 __free_page(rdev->dummy_page.page);
818 rdev->dummy_page.page = NULL;
819}
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
841{
842 struct radeon_device *rdev = info->dev->dev_private;
843 uint32_t r;
844
845 r = rdev->pll_rreg(rdev, reg);
846 return r;
847}
848
849
850
851
852
853
854
855
856
857
858static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
859{
860 struct radeon_device *rdev = info->dev->dev_private;
861
862 rdev->pll_wreg(rdev, reg, val);
863}
864
865
866
867
868
869
870
871
872
873
874static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
875{
876 struct radeon_device *rdev = info->dev->dev_private;
877 uint32_t r;
878
879 r = rdev->mc_rreg(rdev, reg);
880 return r;
881}
882
883
884
885
886
887
888
889
890
891
892static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
893{
894 struct radeon_device *rdev = info->dev->dev_private;
895
896 rdev->mc_wreg(rdev, reg, val);
897}
898
899
900
901
902
903
904
905
906
907
908static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
909{
910 struct radeon_device *rdev = info->dev->dev_private;
911
912 WREG32(reg*4, val);
913}
914
915
916
917
918
919
920
921
922
923
924static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
925{
926 struct radeon_device *rdev = info->dev->dev_private;
927 uint32_t r;
928
929 r = RREG32(reg*4);
930 return r;
931}
932
933
934
935
936
937
938
939
940
941
942static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
943{
944 struct radeon_device *rdev = info->dev->dev_private;
945
946 WREG32_IO(reg*4, val);
947}
948
949
950
951
952
953
954
955
956
957
958static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
959{
960 struct radeon_device *rdev = info->dev->dev_private;
961 uint32_t r;
962
963 r = RREG32_IO(reg*4);
964 return r;
965}
966
967
968
969
970
971
972
973
974
975
976
977int radeon_atombios_init(struct radeon_device *rdev)
978{
979 struct card_info *atom_card_info =
980 kzalloc(sizeof(struct card_info), GFP_KERNEL);
981
982 if (!atom_card_info)
983 return -ENOMEM;
984
985 rdev->mode_info.atom_card_info = atom_card_info;
986 atom_card_info->dev = rdev->ddev;
987 atom_card_info->reg_read = cail_reg_read;
988 atom_card_info->reg_write = cail_reg_write;
989
990 if (rdev->rio_mem) {
991 atom_card_info->ioreg_read = cail_ioreg_read;
992 atom_card_info->ioreg_write = cail_ioreg_write;
993 } else {
994 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
995 atom_card_info->ioreg_read = cail_reg_read;
996 atom_card_info->ioreg_write = cail_reg_write;
997 }
998 atom_card_info->mc_read = cail_mc_read;
999 atom_card_info->mc_write = cail_mc_write;
1000 atom_card_info->pll_read = cail_pll_read;
1001 atom_card_info->pll_write = cail_pll_write;
1002
1003 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1004 if (!rdev->mode_info.atom_context) {
1005 radeon_atombios_fini(rdev);
1006 return -ENOMEM;
1007 }
1008
1009 mutex_init(&rdev->mode_info.atom_context->mutex);
1010 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1011 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1012 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1013 return 0;
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025void radeon_atombios_fini(struct radeon_device *rdev)
1026{
1027 if (rdev->mode_info.atom_context) {
1028 kfree(rdev->mode_info.atom_context->scratch);
1029 }
1030 kfree(rdev->mode_info.atom_context);
1031 rdev->mode_info.atom_context = NULL;
1032 kfree(rdev->mode_info.atom_card_info);
1033 rdev->mode_info.atom_card_info = NULL;
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052int radeon_combios_init(struct radeon_device *rdev)
1053{
1054 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1055 return 0;
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066void radeon_combios_fini(struct radeon_device *rdev)
1067{
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1081{
1082 struct radeon_device *rdev = cookie;
1083 radeon_vga_set_state(rdev, state);
1084 if (state)
1085 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1086 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1087 else
1088 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099static bool radeon_check_pot_argument(int arg)
1100{
1101 return (arg & (arg - 1)) == 0;
1102}
1103
1104
1105
1106
1107
1108
1109static int radeon_gart_size_auto(enum radeon_family family)
1110{
1111
1112 if (family >= CHIP_TAHITI)
1113 return 2048;
1114 else if (family >= CHIP_RV770)
1115 return 1024;
1116 else
1117 return 512;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static void radeon_check_arguments(struct radeon_device *rdev)
1129{
1130
1131 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1132 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1133 radeon_vram_limit);
1134 radeon_vram_limit = 0;
1135 }
1136
1137 if (radeon_gart_size == -1) {
1138 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1139 }
1140
1141 if (radeon_gart_size < 32) {
1142 dev_warn(rdev->dev, "gart size (%d) too small\n",
1143 radeon_gart_size);
1144 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1145 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1146 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1147 radeon_gart_size);
1148 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1149 }
1150 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1151
1152
1153 switch (radeon_agpmode) {
1154 case -1:
1155 case 0:
1156 case 1:
1157 case 2:
1158 case 4:
1159 case 8:
1160 break;
1161 default:
1162 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1163 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1164 radeon_agpmode = 0;
1165 break;
1166 }
1167
1168 if (!radeon_check_pot_argument(radeon_vm_size)) {
1169 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1170 radeon_vm_size);
1171 radeon_vm_size = 4;
1172 }
1173
1174 if (radeon_vm_size < 1) {
1175 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1176 radeon_vm_size);
1177 radeon_vm_size = 4;
1178 }
1179
1180
1181
1182
1183 if (radeon_vm_size > 1024) {
1184 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1185 radeon_vm_size);
1186 radeon_vm_size = 4;
1187 }
1188
1189
1190
1191
1192 if (radeon_vm_block_size == -1) {
1193
1194
1195 unsigned bits = ilog2(radeon_vm_size) + 18;
1196
1197
1198
1199 if (radeon_vm_size <= 8)
1200 radeon_vm_block_size = bits - 9;
1201 else
1202 radeon_vm_block_size = (bits + 3) / 2;
1203
1204 } else if (radeon_vm_block_size < 9) {
1205 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1206 radeon_vm_block_size);
1207 radeon_vm_block_size = 9;
1208 }
1209
1210 if (radeon_vm_block_size > 24 ||
1211 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1212 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1213 radeon_vm_block_size);
1214 radeon_vm_block_size = 9;
1215 }
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1228{
1229 struct drm_device *dev = pci_get_drvdata(pdev);
1230 struct radeon_device *rdev = dev->dev_private;
1231
1232 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1233 return;
1234
1235 if (state == VGA_SWITCHEROO_ON) {
1236 unsigned d3_delay = dev->pdev->d3_delay;
1237
1238 printk(KERN_INFO "radeon: switched on\n");
1239
1240 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1241
1242 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
1243 dev->pdev->d3_delay = 20;
1244
1245 radeon_resume_kms(dev, true, true);
1246
1247 dev->pdev->d3_delay = d3_delay;
1248
1249 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1250 drm_kms_helper_poll_enable(dev);
1251 } else {
1252 printk(KERN_INFO "radeon: switched off\n");
1253 drm_kms_helper_poll_disable(dev);
1254 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1255 radeon_suspend_kms(dev, true, true, false);
1256 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1257 }
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1270{
1271 struct drm_device *dev = pci_get_drvdata(pdev);
1272
1273
1274
1275
1276
1277
1278 return dev->open_count == 0;
1279}
1280
1281static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1282 .set_gpu_state = radeon_switcheroo_set_state,
1283 .reprobe = NULL,
1284 .can_switch = radeon_switcheroo_can_switch,
1285};
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299int radeon_device_init(struct radeon_device *rdev,
1300 struct drm_device *ddev,
1301 struct pci_dev *pdev,
1302 uint32_t flags)
1303{
1304 int r, i;
1305 int dma_bits;
1306 bool runtime = false;
1307
1308 rdev->shutdown = false;
1309 rdev->dev = &pdev->dev;
1310 rdev->ddev = ddev;
1311 rdev->pdev = pdev;
1312 rdev->flags = flags;
1313 rdev->family = flags & RADEON_FAMILY_MASK;
1314 rdev->is_atom_bios = false;
1315 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1316 rdev->mc.gtt_size = 512 * 1024 * 1024;
1317 rdev->accel_working = false;
1318
1319 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1320 rdev->ring[i].idx = i;
1321 }
1322 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1323
1324 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1325 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1326 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1327
1328
1329
1330 mutex_init(&rdev->ring_lock);
1331 mutex_init(&rdev->dc_hw_i2c_mutex);
1332 atomic_set(&rdev->ih.lock, 0);
1333 mutex_init(&rdev->gem.mutex);
1334 mutex_init(&rdev->pm.mutex);
1335 mutex_init(&rdev->gpu_clock_mutex);
1336 mutex_init(&rdev->srbm_mutex);
1337 mutex_init(&rdev->grbm_idx_mutex);
1338 init_rwsem(&rdev->pm.mclk_lock);
1339 init_rwsem(&rdev->exclusive_lock);
1340 init_waitqueue_head(&rdev->irq.vblank_queue);
1341 mutex_init(&rdev->mn_lock);
1342 hash_init(rdev->mn_hash);
1343 r = radeon_gem_init(rdev);
1344 if (r)
1345 return r;
1346
1347 radeon_check_arguments(rdev);
1348
1349
1350
1351 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1352
1353
1354 r = radeon_asic_init(rdev);
1355 if (r)
1356 return r;
1357
1358
1359
1360
1361 if ((rdev->family >= CHIP_RS400) &&
1362 (rdev->flags & RADEON_IS_IGP)) {
1363 rdev->flags &= ~RADEON_IS_AGP;
1364 }
1365
1366 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1367 radeon_agp_disable(rdev);
1368 }
1369
1370
1371
1372
1373
1374 if (rdev->family >= CHIP_CAYMAN)
1375 rdev->mc.mc_mask = 0xffffffffffULL;
1376 else if (rdev->family >= CHIP_CEDAR)
1377 rdev->mc.mc_mask = 0xfffffffffULL;
1378 else
1379 rdev->mc.mc_mask = 0xffffffffULL;
1380
1381
1382
1383
1384
1385
1386
1387 rdev->need_dma32 = false;
1388 if (rdev->flags & RADEON_IS_AGP)
1389 rdev->need_dma32 = true;
1390 if ((rdev->flags & RADEON_IS_PCI) &&
1391 (rdev->family <= CHIP_RS740))
1392 rdev->need_dma32 = true;
1393
1394 dma_bits = rdev->need_dma32 ? 32 : 40;
1395 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1396 if (r) {
1397 rdev->need_dma32 = true;
1398 dma_bits = 32;
1399 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1400 }
1401 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1402 if (r) {
1403 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1404 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1405 }
1406
1407
1408
1409 spin_lock_init(&rdev->mmio_idx_lock);
1410 spin_lock_init(&rdev->smc_idx_lock);
1411 spin_lock_init(&rdev->pll_idx_lock);
1412 spin_lock_init(&rdev->mc_idx_lock);
1413 spin_lock_init(&rdev->pcie_idx_lock);
1414 spin_lock_init(&rdev->pciep_idx_lock);
1415 spin_lock_init(&rdev->pif_idx_lock);
1416 spin_lock_init(&rdev->cg_idx_lock);
1417 spin_lock_init(&rdev->uvd_idx_lock);
1418 spin_lock_init(&rdev->rcu_idx_lock);
1419 spin_lock_init(&rdev->didt_idx_lock);
1420 spin_lock_init(&rdev->end_idx_lock);
1421 if (rdev->family >= CHIP_BONAIRE) {
1422 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1423 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1424 } else {
1425 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1426 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1427 }
1428 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1429 if (rdev->rmmio == NULL) {
1430 return -ENOMEM;
1431 }
1432 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1433 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1434
1435
1436 if (rdev->family >= CHIP_BONAIRE)
1437 radeon_doorbell_init(rdev);
1438
1439
1440 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1441 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1442 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1443 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1444 break;
1445 }
1446 }
1447 if (rdev->rio_mem == NULL)
1448 DRM_ERROR("Unable to find PCI I/O BAR\n");
1449
1450 if (rdev->flags & RADEON_IS_PX)
1451 radeon_device_handle_px_quirks(rdev);
1452
1453
1454
1455
1456 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1457
1458 if (rdev->flags & RADEON_IS_PX)
1459 runtime = true;
1460 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1461 if (runtime)
1462 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1463
1464 r = radeon_init(rdev);
1465 if (r)
1466 goto failed;
1467
1468 r = radeon_gem_debugfs_init(rdev);
1469 if (r) {
1470 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1471 }
1472
1473 r = radeon_mst_debugfs_init(rdev);
1474 if (r) {
1475 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1476 }
1477
1478 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1479
1480
1481
1482 radeon_asic_reset(rdev);
1483 radeon_fini(rdev);
1484 radeon_agp_disable(rdev);
1485 r = radeon_init(rdev);
1486 if (r)
1487 goto failed;
1488 }
1489
1490 r = radeon_ib_ring_tests(rdev);
1491 if (r)
1492 DRM_ERROR("ib ring test failed (%d).\n", r);
1493
1494
1495
1496
1497
1498
1499 if (rdev->pm.dpm_enabled &&
1500 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1501 (rdev->family == CHIP_TURKS) &&
1502 (rdev->flags & RADEON_IS_MOBILITY)) {
1503 mutex_lock(&rdev->pm.mutex);
1504 radeon_dpm_disable(rdev);
1505 radeon_dpm_enable(rdev);
1506 mutex_unlock(&rdev->pm.mutex);
1507 }
1508
1509 if ((radeon_testing & 1)) {
1510 if (rdev->accel_working)
1511 radeon_test_moves(rdev);
1512 else
1513 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1514 }
1515 if ((radeon_testing & 2)) {
1516 if (rdev->accel_working)
1517 radeon_test_syncing(rdev);
1518 else
1519 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1520 }
1521 if (radeon_benchmarking) {
1522 if (rdev->accel_working)
1523 radeon_benchmark(rdev, radeon_benchmarking);
1524 else
1525 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1526 }
1527 return 0;
1528
1529failed:
1530
1531 if (radeon_is_px(ddev))
1532 pm_runtime_put_noidle(ddev->dev);
1533 if (runtime)
1534 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1535 return r;
1536}
1537
1538static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548void radeon_device_fini(struct radeon_device *rdev)
1549{
1550 DRM_INFO("radeon: finishing device.\n");
1551 rdev->shutdown = true;
1552
1553 radeon_bo_evict_vram(rdev);
1554 radeon_fini(rdev);
1555 vga_switcheroo_unregister_client(rdev->pdev);
1556 if (rdev->flags & RADEON_IS_PX)
1557 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1558 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1559 if (rdev->rio_mem)
1560 pci_iounmap(rdev->pdev, rdev->rio_mem);
1561 rdev->rio_mem = NULL;
1562 iounmap(rdev->rmmio);
1563 rdev->rmmio = NULL;
1564 if (rdev->family >= CHIP_BONAIRE)
1565 radeon_doorbell_fini(rdev);
1566 radeon_debugfs_remove_files(rdev);
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1584 bool fbcon, bool freeze)
1585{
1586 struct radeon_device *rdev;
1587 struct drm_crtc *crtc;
1588 struct drm_connector *connector;
1589 int i, r;
1590
1591 if (dev == NULL || dev->dev_private == NULL) {
1592 return -ENODEV;
1593 }
1594
1595 rdev = dev->dev_private;
1596
1597 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1598 return 0;
1599
1600 drm_kms_helper_poll_disable(dev);
1601
1602 drm_modeset_lock_all(dev);
1603
1604 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1605 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1606 }
1607 drm_modeset_unlock_all(dev);
1608
1609
1610 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1611 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1612 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1613 struct radeon_bo *robj;
1614
1615 if (radeon_crtc->cursor_bo) {
1616 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1617 r = radeon_bo_reserve(robj, false);
1618 if (r == 0) {
1619 radeon_bo_unpin(robj);
1620 radeon_bo_unreserve(robj);
1621 }
1622 }
1623
1624 if (rfb == NULL || rfb->obj == NULL) {
1625 continue;
1626 }
1627 robj = gem_to_radeon_bo(rfb->obj);
1628
1629 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1630 r = radeon_bo_reserve(robj, false);
1631 if (r == 0) {
1632 radeon_bo_unpin(robj);
1633 radeon_bo_unreserve(robj);
1634 }
1635 }
1636 }
1637
1638 radeon_bo_evict_vram(rdev);
1639
1640
1641 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1642 r = radeon_fence_wait_empty(rdev, i);
1643 if (r) {
1644
1645 radeon_fence_driver_force_completion(rdev, i);
1646 }
1647 }
1648
1649 radeon_save_bios_scratch_regs(rdev);
1650
1651 radeon_suspend(rdev);
1652 radeon_hpd_fini(rdev);
1653
1654 radeon_bo_evict_vram(rdev);
1655
1656 radeon_agp_suspend(rdev);
1657
1658 pci_save_state(dev->pdev);
1659 if (freeze && rdev->family >= CHIP_CEDAR) {
1660 rdev->asic->asic_reset(rdev, true);
1661 pci_restore_state(dev->pdev);
1662 } else if (suspend) {
1663
1664 pci_disable_device(dev->pdev);
1665 pci_set_power_state(dev->pdev, PCI_D3hot);
1666 }
1667
1668 if (fbcon) {
1669 console_lock();
1670 radeon_fbdev_set_suspend(rdev, 1);
1671 console_unlock();
1672 }
1673 return 0;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1686{
1687 struct drm_connector *connector;
1688 struct radeon_device *rdev = dev->dev_private;
1689 struct drm_crtc *crtc;
1690 int r;
1691
1692 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1693 return 0;
1694
1695 if (fbcon) {
1696 console_lock();
1697 }
1698 if (resume) {
1699 pci_set_power_state(dev->pdev, PCI_D0);
1700 pci_restore_state(dev->pdev);
1701 if (pci_enable_device(dev->pdev)) {
1702 if (fbcon)
1703 console_unlock();
1704 return -1;
1705 }
1706 }
1707
1708 radeon_agp_resume(rdev);
1709 radeon_resume(rdev);
1710
1711 r = radeon_ib_ring_tests(rdev);
1712 if (r)
1713 DRM_ERROR("ib ring test failed (%d).\n", r);
1714
1715 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1716
1717 r = radeon_pm_late_init(rdev);
1718 if (r) {
1719 rdev->pm.dpm_enabled = false;
1720 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1721 }
1722 } else {
1723
1724 radeon_pm_resume(rdev);
1725 }
1726
1727 radeon_restore_bios_scratch_regs(rdev);
1728
1729
1730 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1731 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1732
1733 if (radeon_crtc->cursor_bo) {
1734 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1735 r = radeon_bo_reserve(robj, false);
1736 if (r == 0) {
1737
1738 r = radeon_bo_pin_restricted(robj,
1739 RADEON_GEM_DOMAIN_VRAM,
1740 ASIC_IS_AVIVO(rdev) ?
1741 0 : 1 << 27,
1742 &radeon_crtc->cursor_addr);
1743 if (r != 0)
1744 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1745 radeon_bo_unreserve(robj);
1746 }
1747 }
1748 }
1749
1750
1751 if (rdev->is_atom_bios) {
1752 radeon_atom_encoder_init(rdev);
1753 radeon_atom_disp_eng_pll_init(rdev);
1754
1755 if (rdev->mode_info.bl_encoder) {
1756 u8 bl_level = radeon_get_backlight_level(rdev,
1757 rdev->mode_info.bl_encoder);
1758 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1759 bl_level);
1760 }
1761 }
1762
1763 radeon_hpd_init(rdev);
1764
1765 if (fbcon) {
1766 drm_helper_resume_force_mode(dev);
1767
1768 drm_modeset_lock_all(dev);
1769 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1770 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1771 }
1772 drm_modeset_unlock_all(dev);
1773 }
1774
1775 drm_kms_helper_poll_enable(dev);
1776
1777
1778 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1779 radeon_pm_compute_clocks(rdev);
1780
1781 if (fbcon) {
1782 radeon_fbdev_set_suspend(rdev, 0);
1783 console_unlock();
1784 }
1785
1786 return 0;
1787}
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797int radeon_gpu_reset(struct radeon_device *rdev)
1798{
1799 unsigned ring_sizes[RADEON_NUM_RINGS];
1800 uint32_t *ring_data[RADEON_NUM_RINGS];
1801
1802 bool saved = false;
1803
1804 int i, r;
1805 int resched;
1806
1807 down_write(&rdev->exclusive_lock);
1808
1809 if (!rdev->needs_reset) {
1810 up_write(&rdev->exclusive_lock);
1811 return 0;
1812 }
1813
1814 atomic_inc(&rdev->gpu_reset_counter);
1815
1816 radeon_save_bios_scratch_regs(rdev);
1817
1818 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1819 radeon_suspend(rdev);
1820 radeon_hpd_fini(rdev);
1821
1822 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1823 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1824 &ring_data[i]);
1825 if (ring_sizes[i]) {
1826 saved = true;
1827 dev_info(rdev->dev, "Saved %d dwords of commands "
1828 "on ring %d.\n", ring_sizes[i], i);
1829 }
1830 }
1831
1832 r = radeon_asic_reset(rdev);
1833 if (!r) {
1834 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1835 radeon_resume(rdev);
1836 }
1837
1838 radeon_restore_bios_scratch_regs(rdev);
1839
1840 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1841 if (!r && ring_data[i]) {
1842 radeon_ring_restore(rdev, &rdev->ring[i],
1843 ring_sizes[i], ring_data[i]);
1844 } else {
1845 radeon_fence_driver_force_completion(rdev, i);
1846 kfree(ring_data[i]);
1847 }
1848 }
1849
1850 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1851
1852 r = radeon_pm_late_init(rdev);
1853 if (r) {
1854 rdev->pm.dpm_enabled = false;
1855 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1856 }
1857 } else {
1858
1859 radeon_pm_resume(rdev);
1860 }
1861
1862
1863 if (rdev->is_atom_bios) {
1864 radeon_atom_encoder_init(rdev);
1865 radeon_atom_disp_eng_pll_init(rdev);
1866
1867 if (rdev->mode_info.bl_encoder) {
1868 u8 bl_level = radeon_get_backlight_level(rdev,
1869 rdev->mode_info.bl_encoder);
1870 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1871 bl_level);
1872 }
1873 }
1874
1875 radeon_hpd_init(rdev);
1876
1877 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1878
1879 rdev->in_reset = true;
1880 rdev->needs_reset = false;
1881
1882 downgrade_write(&rdev->exclusive_lock);
1883
1884 drm_helper_resume_force_mode(rdev->ddev);
1885
1886
1887 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1888 radeon_pm_compute_clocks(rdev);
1889
1890 if (!r) {
1891 r = radeon_ib_ring_tests(rdev);
1892 if (r && saved)
1893 r = -EAGAIN;
1894 } else {
1895
1896 dev_info(rdev->dev, "GPU reset failed\n");
1897 }
1898
1899 rdev->needs_reset = r == -EAGAIN;
1900 rdev->in_reset = false;
1901
1902 up_read(&rdev->exclusive_lock);
1903 return r;
1904}
1905
1906
1907
1908
1909
1910int radeon_debugfs_add_files(struct radeon_device *rdev,
1911 struct drm_info_list *files,
1912 unsigned nfiles)
1913{
1914 unsigned i;
1915
1916 for (i = 0; i < rdev->debugfs_count; i++) {
1917 if (rdev->debugfs[i].files == files) {
1918
1919 return 0;
1920 }
1921 }
1922
1923 i = rdev->debugfs_count + 1;
1924 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1925 DRM_ERROR("Reached maximum number of debugfs components.\n");
1926 DRM_ERROR("Report so we increase "
1927 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1928 return -EINVAL;
1929 }
1930 rdev->debugfs[rdev->debugfs_count].files = files;
1931 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1932 rdev->debugfs_count = i;
1933#if defined(CONFIG_DEBUG_FS)
1934 drm_debugfs_create_files(files, nfiles,
1935 rdev->ddev->control->debugfs_root,
1936 rdev->ddev->control);
1937 drm_debugfs_create_files(files, nfiles,
1938 rdev->ddev->primary->debugfs_root,
1939 rdev->ddev->primary);
1940#endif
1941 return 0;
1942}
1943
1944static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1945{
1946#if defined(CONFIG_DEBUG_FS)
1947 unsigned i;
1948
1949 for (i = 0; i < rdev->debugfs_count; i++) {
1950 drm_debugfs_remove_files(rdev->debugfs[i].files,
1951 rdev->debugfs[i].num_files,
1952 rdev->ddev->control);
1953 drm_debugfs_remove_files(rdev->debugfs[i].files,
1954 rdev->debugfs[i].num_files,
1955 rdev->ddev->primary);
1956 }
1957#endif
1958}
1959
1960#if defined(CONFIG_DEBUG_FS)
1961int radeon_debugfs_init(struct drm_minor *minor)
1962{
1963 return 0;
1964}
1965
1966void radeon_debugfs_cleanup(struct drm_minor *minor)
1967{
1968}
1969#endif
1970