1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
35#include <linux/efi.h>
36#include "radeon_reg.h"
37#include "radeon.h"
38#include "atom.h"
39
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
85 "PALM",
86 "SUMO",
87 "SUMO2",
88 "BARTS",
89 "TURKS",
90 "CAICOS",
91 "CAYMAN",
92 "ARUBA",
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
96 "OLAND",
97 "HAINAN",
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
101 "HAWAII",
102 "MULLINS",
103 "LAST",
104};
105
106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
108
109struct radeon_px_quirk {
110 u32 chip_vendor;
111 u32 chip_device;
112 u32 subsys_vendor;
113 u32 subsys_device;
114 u32 px_quirk_flags;
115};
116
117static struct radeon_px_quirk radeon_px_quirk_list[] = {
118
119
120
121 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
122
123
124
125 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
126
127
128
129 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
130
131 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
132 { 0, 0, 0, 0, 0 },
133};
134
135bool radeon_is_px(struct drm_device *dev)
136{
137 struct radeon_device *rdev = dev->dev_private;
138
139 if (rdev->flags & RADEON_IS_PX)
140 return true;
141 return false;
142}
143
144static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
145{
146 struct radeon_px_quirk *p = radeon_px_quirk_list;
147
148
149 while (p && p->chip_device != 0) {
150 if (rdev->pdev->vendor == p->chip_vendor &&
151 rdev->pdev->device == p->chip_device &&
152 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
153 rdev->pdev->subsystem_device == p->subsys_device) {
154 rdev->px_quirk_flags = p->px_quirk_flags;
155 break;
156 }
157 ++p;
158 }
159
160 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
161 rdev->flags &= ~RADEON_IS_PX;
162}
163
164
165
166
167
168
169
170
171
172
173
174void radeon_program_register_sequence(struct radeon_device *rdev,
175 const u32 *registers,
176 const u32 array_size)
177{
178 u32 tmp, reg, and_mask, or_mask;
179 int i;
180
181 if (array_size % 3)
182 return;
183
184 for (i = 0; i < array_size; i +=3) {
185 reg = registers[i + 0];
186 and_mask = registers[i + 1];
187 or_mask = registers[i + 2];
188
189 if (and_mask == 0xffffffff) {
190 tmp = or_mask;
191 } else {
192 tmp = RREG32(reg);
193 tmp &= ~and_mask;
194 tmp |= or_mask;
195 }
196 WREG32(reg, tmp);
197 }
198}
199
200void radeon_pci_config_reset(struct radeon_device *rdev)
201{
202 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
203}
204
205
206
207
208
209
210
211
212void radeon_surface_init(struct radeon_device *rdev)
213{
214
215 if (rdev->family < CHIP_R600) {
216 int i;
217
218 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
219 if (rdev->surface_regs[i].bo)
220 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
221 else
222 radeon_clear_surface_reg(rdev, i);
223 }
224
225 WREG32(RADEON_SURFACE_CNTL, 0);
226 }
227}
228
229
230
231
232
233
234
235
236
237
238
239void radeon_scratch_init(struct radeon_device *rdev)
240{
241 int i;
242
243
244 if (rdev->family < CHIP_R300) {
245 rdev->scratch.num_reg = 5;
246 } else {
247 rdev->scratch.num_reg = 7;
248 }
249 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
250 for (i = 0; i < rdev->scratch.num_reg; i++) {
251 rdev->scratch.free[i] = true;
252 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
253 }
254}
255
256
257
258
259
260
261
262
263
264
265int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
266{
267 int i;
268
269 for (i = 0; i < rdev->scratch.num_reg; i++) {
270 if (rdev->scratch.free[i]) {
271 rdev->scratch.free[i] = false;
272 *reg = rdev->scratch.reg[i];
273 return 0;
274 }
275 }
276 return -EINVAL;
277}
278
279
280
281
282
283
284
285
286
287void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
288{
289 int i;
290
291 for (i = 0; i < rdev->scratch.num_reg; i++) {
292 if (rdev->scratch.reg[i] == reg) {
293 rdev->scratch.free[i] = true;
294 return;
295 }
296 }
297}
298
299
300
301
302
303
304
305
306
307
308
309
310static int radeon_doorbell_init(struct radeon_device *rdev)
311{
312
313 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
314 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
315
316 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
317 if (rdev->doorbell.num_doorbells == 0)
318 return -EINVAL;
319
320 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
321 if (rdev->doorbell.ptr == NULL) {
322 return -ENOMEM;
323 }
324 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
325 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
326
327 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
328
329 return 0;
330}
331
332
333
334
335
336
337
338
339static void radeon_doorbell_fini(struct radeon_device *rdev)
340{
341 iounmap(rdev->doorbell.ptr);
342 rdev->doorbell.ptr = NULL;
343}
344
345
346
347
348
349
350
351
352
353
354int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
355{
356 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
357 if (offset < rdev->doorbell.num_doorbells) {
358 __set_bit(offset, rdev->doorbell.used);
359 *doorbell = offset;
360 return 0;
361 } else {
362 return -EINVAL;
363 }
364}
365
366
367
368
369
370
371
372
373
374void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
375{
376 if (doorbell < rdev->doorbell.num_doorbells)
377 __clear_bit(doorbell, rdev->doorbell.used);
378}
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
394 phys_addr_t *aperture_base,
395 size_t *aperture_size,
396 size_t *start_offset)
397{
398
399
400 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
401 *aperture_base = rdev->doorbell.base;
402 *aperture_size = rdev->doorbell.size;
403 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
404 } else {
405 *aperture_base = 0;
406 *aperture_size = 0;
407 *start_offset = 0;
408 }
409}
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425void radeon_wb_disable(struct radeon_device *rdev)
426{
427 rdev->wb.enabled = false;
428}
429
430
431
432
433
434
435
436
437
438void radeon_wb_fini(struct radeon_device *rdev)
439{
440 radeon_wb_disable(rdev);
441 if (rdev->wb.wb_obj) {
442 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
443 radeon_bo_kunmap(rdev->wb.wb_obj);
444 radeon_bo_unpin(rdev->wb.wb_obj);
445 radeon_bo_unreserve(rdev->wb.wb_obj);
446 }
447 radeon_bo_unref(&rdev->wb.wb_obj);
448 rdev->wb.wb = NULL;
449 rdev->wb.wb_obj = NULL;
450 }
451}
452
453
454
455
456
457
458
459
460
461
462int radeon_wb_init(struct radeon_device *rdev)
463{
464 int r;
465
466 if (rdev->wb.wb_obj == NULL) {
467 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
468 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
469 &rdev->wb.wb_obj);
470 if (r) {
471 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
472 return r;
473 }
474 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
475 if (unlikely(r != 0)) {
476 radeon_wb_fini(rdev);
477 return r;
478 }
479 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
480 &rdev->wb.gpu_addr);
481 if (r) {
482 radeon_bo_unreserve(rdev->wb.wb_obj);
483 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
484 radeon_wb_fini(rdev);
485 return r;
486 }
487 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
488 radeon_bo_unreserve(rdev->wb.wb_obj);
489 if (r) {
490 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
491 radeon_wb_fini(rdev);
492 return r;
493 }
494 }
495
496
497 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
498
499 rdev->wb.use_event = false;
500
501 if (radeon_no_wb == 1) {
502 rdev->wb.enabled = false;
503 } else {
504 if (rdev->flags & RADEON_IS_AGP) {
505
506 rdev->wb.enabled = false;
507 } else if (rdev->family < CHIP_R300) {
508
509 rdev->wb.enabled = false;
510 } else {
511 rdev->wb.enabled = true;
512
513 if (rdev->family >= CHIP_R600) {
514 rdev->wb.use_event = true;
515 }
516 }
517 }
518
519 if (rdev->family >= CHIP_PALM) {
520 rdev->wb.enabled = true;
521 rdev->wb.use_event = true;
522 }
523
524 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
525
526 return 0;
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
571{
572 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
573
574 mc->vram_start = base;
575 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
576 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
577 mc->real_vram_size = mc->aper_size;
578 mc->mc_vram_size = mc->aper_size;
579 }
580 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
581 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
582 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
583 mc->real_vram_size = mc->aper_size;
584 mc->mc_vram_size = mc->aper_size;
585 }
586 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
587 if (limit && limit < mc->real_vram_size)
588 mc->real_vram_size = limit;
589 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
590 mc->mc_vram_size >> 20, mc->vram_start,
591 mc->vram_end, mc->real_vram_size >> 20);
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
607{
608 u64 size_af, size_bf;
609
610 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
611 size_bf = mc->vram_start & ~mc->gtt_base_align;
612 if (size_bf > size_af) {
613 if (mc->gtt_size > size_bf) {
614 dev_warn(rdev->dev, "limiting GTT\n");
615 mc->gtt_size = size_bf;
616 }
617 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
618 } else {
619 if (mc->gtt_size > size_af) {
620 dev_warn(rdev->dev, "limiting GTT\n");
621 mc->gtt_size = size_af;
622 }
623 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
624 }
625 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
626 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
627 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
628}
629
630
631
632
633
634
635
636
637
638
639
640
641
642bool radeon_card_posted(struct radeon_device *rdev)
643{
644 uint32_t reg;
645
646
647 if (efi_enabled(EFI_BOOT) &&
648 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
649 (rdev->family < CHIP_R600))
650 return false;
651
652 if (ASIC_IS_NODCE(rdev))
653 goto check_memsize;
654
655
656 if (ASIC_IS_DCE4(rdev)) {
657 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
658 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
659 if (rdev->num_crtc >= 4) {
660 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
661 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
662 }
663 if (rdev->num_crtc >= 6) {
664 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
665 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
666 }
667 if (reg & EVERGREEN_CRTC_MASTER_EN)
668 return true;
669 } else if (ASIC_IS_AVIVO(rdev)) {
670 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
671 RREG32(AVIVO_D2CRTC_CONTROL);
672 if (reg & AVIVO_CRTC_EN) {
673 return true;
674 }
675 } else {
676 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
677 RREG32(RADEON_CRTC2_GEN_CNTL);
678 if (reg & RADEON_CRTC_EN) {
679 return true;
680 }
681 }
682
683check_memsize:
684
685 if (rdev->family >= CHIP_R600)
686 reg = RREG32(R600_CONFIG_MEMSIZE);
687 else
688 reg = RREG32(RADEON_CONFIG_MEMSIZE);
689
690 if (reg)
691 return true;
692
693 return false;
694
695}
696
697
698
699
700
701
702
703
704
705void radeon_update_bandwidth_info(struct radeon_device *rdev)
706{
707 fixed20_12 a;
708 u32 sclk = rdev->pm.current_sclk;
709 u32 mclk = rdev->pm.current_mclk;
710
711
712 a.full = dfixed_const(100);
713 rdev->pm.sclk.full = dfixed_const(sclk);
714 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
715 rdev->pm.mclk.full = dfixed_const(mclk);
716 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
717
718 if (rdev->flags & RADEON_IS_IGP) {
719 a.full = dfixed_const(16);
720
721 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
722 }
723}
724
725
726
727
728
729
730
731
732
733
734bool radeon_boot_test_post_card(struct radeon_device *rdev)
735{
736 if (radeon_card_posted(rdev))
737 return true;
738
739 if (rdev->bios) {
740 DRM_INFO("GPU not posted. posting now...\n");
741 if (rdev->is_atom_bios)
742 atom_asic_init(rdev->mode_info.atom_context);
743 else
744 radeon_combios_asic_init(rdev->ddev);
745 return true;
746 } else {
747 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
748 return false;
749 }
750}
751
752
753
754
755
756
757
758
759
760
761
762int radeon_dummy_page_init(struct radeon_device *rdev)
763{
764 if (rdev->dummy_page.page)
765 return 0;
766 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
767 if (rdev->dummy_page.page == NULL)
768 return -ENOMEM;
769 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
770 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
771 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
772 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
773 __free_page(rdev->dummy_page.page);
774 rdev->dummy_page.page = NULL;
775 return -ENOMEM;
776 }
777 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
778 RADEON_GART_PAGE_DUMMY);
779 return 0;
780}
781
782
783
784
785
786
787
788
789void radeon_dummy_page_fini(struct radeon_device *rdev)
790{
791 if (rdev->dummy_page.page == NULL)
792 return;
793 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
794 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
795 __free_page(rdev->dummy_page.page);
796 rdev->dummy_page.page = NULL;
797}
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
819{
820 struct radeon_device *rdev = info->dev->dev_private;
821 uint32_t r;
822
823 r = rdev->pll_rreg(rdev, reg);
824 return r;
825}
826
827
828
829
830
831
832
833
834
835
836static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
837{
838 struct radeon_device *rdev = info->dev->dev_private;
839
840 rdev->pll_wreg(rdev, reg, val);
841}
842
843
844
845
846
847
848
849
850
851
852static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
853{
854 struct radeon_device *rdev = info->dev->dev_private;
855 uint32_t r;
856
857 r = rdev->mc_rreg(rdev, reg);
858 return r;
859}
860
861
862
863
864
865
866
867
868
869
870static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
871{
872 struct radeon_device *rdev = info->dev->dev_private;
873
874 rdev->mc_wreg(rdev, reg, val);
875}
876
877
878
879
880
881
882
883
884
885
886static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
887{
888 struct radeon_device *rdev = info->dev->dev_private;
889
890 WREG32(reg*4, val);
891}
892
893
894
895
896
897
898
899
900
901
902static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
903{
904 struct radeon_device *rdev = info->dev->dev_private;
905 uint32_t r;
906
907 r = RREG32(reg*4);
908 return r;
909}
910
911
912
913
914
915
916
917
918
919
920static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
921{
922 struct radeon_device *rdev = info->dev->dev_private;
923
924 WREG32_IO(reg*4, val);
925}
926
927
928
929
930
931
932
933
934
935
936static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
937{
938 struct radeon_device *rdev = info->dev->dev_private;
939 uint32_t r;
940
941 r = RREG32_IO(reg*4);
942 return r;
943}
944
945
946
947
948
949
950
951
952
953
954
955int radeon_atombios_init(struct radeon_device *rdev)
956{
957 struct card_info *atom_card_info =
958 kzalloc(sizeof(struct card_info), GFP_KERNEL);
959
960 if (!atom_card_info)
961 return -ENOMEM;
962
963 rdev->mode_info.atom_card_info = atom_card_info;
964 atom_card_info->dev = rdev->ddev;
965 atom_card_info->reg_read = cail_reg_read;
966 atom_card_info->reg_write = cail_reg_write;
967
968 if (rdev->rio_mem) {
969 atom_card_info->ioreg_read = cail_ioreg_read;
970 atom_card_info->ioreg_write = cail_ioreg_write;
971 } else {
972 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
973 atom_card_info->ioreg_read = cail_reg_read;
974 atom_card_info->ioreg_write = cail_reg_write;
975 }
976 atom_card_info->mc_read = cail_mc_read;
977 atom_card_info->mc_write = cail_mc_write;
978 atom_card_info->pll_read = cail_pll_read;
979 atom_card_info->pll_write = cail_pll_write;
980
981 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
982 if (!rdev->mode_info.atom_context) {
983 radeon_atombios_fini(rdev);
984 return -ENOMEM;
985 }
986
987 mutex_init(&rdev->mode_info.atom_context->mutex);
988 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
989 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
990 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
991 return 0;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003void radeon_atombios_fini(struct radeon_device *rdev)
1004{
1005 if (rdev->mode_info.atom_context) {
1006 kfree(rdev->mode_info.atom_context->scratch);
1007 }
1008 kfree(rdev->mode_info.atom_context);
1009 rdev->mode_info.atom_context = NULL;
1010 kfree(rdev->mode_info.atom_card_info);
1011 rdev->mode_info.atom_card_info = NULL;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030int radeon_combios_init(struct radeon_device *rdev)
1031{
1032 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1033 return 0;
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044void radeon_combios_fini(struct radeon_device *rdev)
1045{
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1059{
1060 struct radeon_device *rdev = cookie;
1061 radeon_vga_set_state(rdev, state);
1062 if (state)
1063 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1064 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1065 else
1066 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static bool radeon_check_pot_argument(int arg)
1078{
1079 return (arg & (arg - 1)) == 0;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static void radeon_check_arguments(struct radeon_device *rdev)
1091{
1092
1093 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1094 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1095 radeon_vram_limit);
1096 radeon_vram_limit = 0;
1097 }
1098
1099 if (radeon_gart_size == -1) {
1100
1101 if (rdev->family >= CHIP_RV770)
1102 radeon_gart_size = 1024;
1103 else
1104 radeon_gart_size = 512;
1105 }
1106
1107 if (radeon_gart_size < 32) {
1108 dev_warn(rdev->dev, "gart size (%d) too small\n",
1109 radeon_gart_size);
1110 if (rdev->family >= CHIP_RV770)
1111 radeon_gart_size = 1024;
1112 else
1113 radeon_gart_size = 512;
1114 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1115 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1116 radeon_gart_size);
1117 if (rdev->family >= CHIP_RV770)
1118 radeon_gart_size = 1024;
1119 else
1120 radeon_gart_size = 512;
1121 }
1122 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1123
1124
1125 switch (radeon_agpmode) {
1126 case -1:
1127 case 0:
1128 case 1:
1129 case 2:
1130 case 4:
1131 case 8:
1132 break;
1133 default:
1134 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1135 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1136 radeon_agpmode = 0;
1137 break;
1138 }
1139
1140 if (!radeon_check_pot_argument(radeon_vm_size)) {
1141 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1142 radeon_vm_size);
1143 radeon_vm_size = 4;
1144 }
1145
1146 if (radeon_vm_size < 1) {
1147 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1148 radeon_vm_size);
1149 radeon_vm_size = 4;
1150 }
1151
1152
1153
1154
1155 if (radeon_vm_size > 1024) {
1156 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1157 radeon_vm_size);
1158 radeon_vm_size = 4;
1159 }
1160
1161
1162
1163
1164 if (radeon_vm_block_size == -1) {
1165
1166
1167 unsigned bits = ilog2(radeon_vm_size) + 18;
1168
1169
1170
1171 if (radeon_vm_size <= 8)
1172 radeon_vm_block_size = bits - 9;
1173 else
1174 radeon_vm_block_size = (bits + 3) / 2;
1175
1176 } else if (radeon_vm_block_size < 9) {
1177 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1178 radeon_vm_block_size);
1179 radeon_vm_block_size = 9;
1180 }
1181
1182 if (radeon_vm_block_size > 24 ||
1183 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1184 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1185 radeon_vm_block_size);
1186 radeon_vm_block_size = 9;
1187 }
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1200{
1201 struct drm_device *dev = pci_get_drvdata(pdev);
1202 struct radeon_device *rdev = dev->dev_private;
1203
1204 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1205 return;
1206
1207 if (state == VGA_SWITCHEROO_ON) {
1208 unsigned d3_delay = dev->pdev->d3_delay;
1209
1210 printk(KERN_INFO "radeon: switched on\n");
1211
1212 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1213
1214 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
1215 dev->pdev->d3_delay = 20;
1216
1217 radeon_resume_kms(dev, true, true);
1218
1219 dev->pdev->d3_delay = d3_delay;
1220
1221 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1222 drm_kms_helper_poll_enable(dev);
1223 } else {
1224 printk(KERN_INFO "radeon: switched off\n");
1225 drm_kms_helper_poll_disable(dev);
1226 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1227 radeon_suspend_kms(dev, true, true);
1228 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1229 }
1230}
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1242{
1243 struct drm_device *dev = pci_get_drvdata(pdev);
1244
1245
1246
1247
1248
1249
1250 return dev->open_count == 0;
1251}
1252
1253static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1254 .set_gpu_state = radeon_switcheroo_set_state,
1255 .reprobe = NULL,
1256 .can_switch = radeon_switcheroo_can_switch,
1257};
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271int radeon_device_init(struct radeon_device *rdev,
1272 struct drm_device *ddev,
1273 struct pci_dev *pdev,
1274 uint32_t flags)
1275{
1276 int r, i;
1277 int dma_bits;
1278 bool runtime = false;
1279
1280 rdev->shutdown = false;
1281 rdev->dev = &pdev->dev;
1282 rdev->ddev = ddev;
1283 rdev->pdev = pdev;
1284 rdev->flags = flags;
1285 rdev->family = flags & RADEON_FAMILY_MASK;
1286 rdev->is_atom_bios = false;
1287 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1288 rdev->mc.gtt_size = 512 * 1024 * 1024;
1289 rdev->accel_working = false;
1290
1291 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1292 rdev->ring[i].idx = i;
1293 }
1294 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1295
1296 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1297 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1298 pdev->subsystem_vendor, pdev->subsystem_device);
1299
1300
1301
1302 mutex_init(&rdev->ring_lock);
1303 mutex_init(&rdev->dc_hw_i2c_mutex);
1304 atomic_set(&rdev->ih.lock, 0);
1305 mutex_init(&rdev->gem.mutex);
1306 mutex_init(&rdev->pm.mutex);
1307 mutex_init(&rdev->gpu_clock_mutex);
1308 mutex_init(&rdev->srbm_mutex);
1309 mutex_init(&rdev->grbm_idx_mutex);
1310 init_rwsem(&rdev->pm.mclk_lock);
1311 init_rwsem(&rdev->exclusive_lock);
1312 init_waitqueue_head(&rdev->irq.vblank_queue);
1313 mutex_init(&rdev->mn_lock);
1314 hash_init(rdev->mn_hash);
1315 r = radeon_gem_init(rdev);
1316 if (r)
1317 return r;
1318
1319 radeon_check_arguments(rdev);
1320
1321
1322
1323 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1324
1325
1326 r = radeon_asic_init(rdev);
1327 if (r)
1328 return r;
1329
1330
1331
1332
1333 if ((rdev->family >= CHIP_RS400) &&
1334 (rdev->flags & RADEON_IS_IGP)) {
1335 rdev->flags &= ~RADEON_IS_AGP;
1336 }
1337
1338 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1339 radeon_agp_disable(rdev);
1340 }
1341
1342
1343
1344
1345
1346 if (rdev->family >= CHIP_CAYMAN)
1347 rdev->mc.mc_mask = 0xffffffffffULL;
1348 else if (rdev->family >= CHIP_CEDAR)
1349 rdev->mc.mc_mask = 0xfffffffffULL;
1350 else
1351 rdev->mc.mc_mask = 0xffffffffULL;
1352
1353
1354
1355
1356
1357
1358
1359 rdev->need_dma32 = false;
1360 if (rdev->flags & RADEON_IS_AGP)
1361 rdev->need_dma32 = true;
1362 if ((rdev->flags & RADEON_IS_PCI) &&
1363 (rdev->family <= CHIP_RS740))
1364 rdev->need_dma32 = true;
1365
1366 dma_bits = rdev->need_dma32 ? 32 : 40;
1367 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1368 if (r) {
1369 rdev->need_dma32 = true;
1370 dma_bits = 32;
1371 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1372 }
1373 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1374 if (r) {
1375 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1376 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1377 }
1378
1379
1380
1381 spin_lock_init(&rdev->mmio_idx_lock);
1382 spin_lock_init(&rdev->smc_idx_lock);
1383 spin_lock_init(&rdev->pll_idx_lock);
1384 spin_lock_init(&rdev->mc_idx_lock);
1385 spin_lock_init(&rdev->pcie_idx_lock);
1386 spin_lock_init(&rdev->pciep_idx_lock);
1387 spin_lock_init(&rdev->pif_idx_lock);
1388 spin_lock_init(&rdev->cg_idx_lock);
1389 spin_lock_init(&rdev->uvd_idx_lock);
1390 spin_lock_init(&rdev->rcu_idx_lock);
1391 spin_lock_init(&rdev->didt_idx_lock);
1392 spin_lock_init(&rdev->end_idx_lock);
1393 if (rdev->family >= CHIP_BONAIRE) {
1394 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1395 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1396 } else {
1397 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1398 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1399 }
1400 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1401 if (rdev->rmmio == NULL) {
1402 return -ENOMEM;
1403 }
1404 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1405 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1406
1407
1408 if (rdev->family >= CHIP_BONAIRE)
1409 radeon_doorbell_init(rdev);
1410
1411
1412 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1413 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1414 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1415 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1416 break;
1417 }
1418 }
1419 if (rdev->rio_mem == NULL)
1420 DRM_ERROR("Unable to find PCI I/O BAR\n");
1421
1422 if (rdev->flags & RADEON_IS_PX)
1423 radeon_device_handle_px_quirks(rdev);
1424
1425
1426
1427
1428 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1429
1430 if (rdev->flags & RADEON_IS_PX)
1431 runtime = true;
1432 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1433 if (runtime)
1434 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1435
1436 r = radeon_init(rdev);
1437 if (r)
1438 goto failed;
1439
1440 r = radeon_gem_debugfs_init(rdev);
1441 if (r) {
1442 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1443 }
1444
1445 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1446
1447
1448
1449 radeon_asic_reset(rdev);
1450 radeon_fini(rdev);
1451 radeon_agp_disable(rdev);
1452 r = radeon_init(rdev);
1453 if (r)
1454 goto failed;
1455 }
1456
1457 r = radeon_ib_ring_tests(rdev);
1458 if (r)
1459 DRM_ERROR("ib ring test failed (%d).\n", r);
1460
1461 if ((radeon_testing & 1)) {
1462 if (rdev->accel_working)
1463 radeon_test_moves(rdev);
1464 else
1465 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1466 }
1467 if ((radeon_testing & 2)) {
1468 if (rdev->accel_working)
1469 radeon_test_syncing(rdev);
1470 else
1471 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1472 }
1473 if (radeon_benchmarking) {
1474 if (rdev->accel_working)
1475 radeon_benchmark(rdev, radeon_benchmarking);
1476 else
1477 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1478 }
1479 return 0;
1480
1481failed:
1482 if (runtime)
1483 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1484 return r;
1485}
1486
1487static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497void radeon_device_fini(struct radeon_device *rdev)
1498{
1499 DRM_INFO("radeon: finishing device.\n");
1500 rdev->shutdown = true;
1501
1502 radeon_bo_evict_vram(rdev);
1503 radeon_fini(rdev);
1504 vga_switcheroo_unregister_client(rdev->pdev);
1505 if (rdev->flags & RADEON_IS_PX)
1506 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1507 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1508 if (rdev->rio_mem)
1509 pci_iounmap(rdev->pdev, rdev->rio_mem);
1510 rdev->rio_mem = NULL;
1511 iounmap(rdev->rmmio);
1512 rdev->rmmio = NULL;
1513 if (rdev->family >= CHIP_BONAIRE)
1514 radeon_doorbell_fini(rdev);
1515 radeon_debugfs_remove_files(rdev);
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1533{
1534 struct radeon_device *rdev;
1535 struct drm_crtc *crtc;
1536 struct drm_connector *connector;
1537 int i, r;
1538
1539 if (dev == NULL || dev->dev_private == NULL) {
1540 return -ENODEV;
1541 }
1542
1543 rdev = dev->dev_private;
1544
1545 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1546 return 0;
1547
1548 drm_kms_helper_poll_disable(dev);
1549
1550
1551 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1552 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1553 }
1554
1555
1556 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1557 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1558 struct radeon_bo *robj;
1559
1560 if (rfb == NULL || rfb->obj == NULL) {
1561 continue;
1562 }
1563 robj = gem_to_radeon_bo(rfb->obj);
1564
1565 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1566 r = radeon_bo_reserve(robj, false);
1567 if (r == 0) {
1568 radeon_bo_unpin(robj);
1569 radeon_bo_unreserve(robj);
1570 }
1571 }
1572 }
1573
1574 radeon_bo_evict_vram(rdev);
1575
1576
1577 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1578 r = radeon_fence_wait_empty(rdev, i);
1579 if (r) {
1580
1581 radeon_fence_driver_force_completion(rdev, i);
1582 }
1583 }
1584
1585 radeon_save_bios_scratch_regs(rdev);
1586
1587 radeon_suspend(rdev);
1588 radeon_hpd_fini(rdev);
1589
1590 radeon_bo_evict_vram(rdev);
1591
1592 radeon_agp_suspend(rdev);
1593
1594 pci_save_state(dev->pdev);
1595 if (suspend) {
1596
1597 pci_disable_device(dev->pdev);
1598 pci_set_power_state(dev->pdev, PCI_D3hot);
1599 }
1600
1601 if (fbcon) {
1602 console_lock();
1603 radeon_fbdev_set_suspend(rdev, 1);
1604 console_unlock();
1605 }
1606 return 0;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1619{
1620 struct drm_connector *connector;
1621 struct radeon_device *rdev = dev->dev_private;
1622 int r;
1623
1624 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1625 return 0;
1626
1627 if (fbcon) {
1628 console_lock();
1629 }
1630 if (resume) {
1631 pci_set_power_state(dev->pdev, PCI_D0);
1632 pci_restore_state(dev->pdev);
1633 if (pci_enable_device(dev->pdev)) {
1634 if (fbcon)
1635 console_unlock();
1636 return -1;
1637 }
1638 }
1639
1640 radeon_agp_resume(rdev);
1641 radeon_resume(rdev);
1642
1643 r = radeon_ib_ring_tests(rdev);
1644 if (r)
1645 DRM_ERROR("ib ring test failed (%d).\n", r);
1646
1647 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1648
1649 r = radeon_pm_late_init(rdev);
1650 if (r) {
1651 rdev->pm.dpm_enabled = false;
1652 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1653 }
1654 } else {
1655
1656 radeon_pm_resume(rdev);
1657 }
1658
1659 radeon_restore_bios_scratch_regs(rdev);
1660
1661
1662 if (rdev->is_atom_bios) {
1663 radeon_atom_encoder_init(rdev);
1664 radeon_atom_disp_eng_pll_init(rdev);
1665
1666 if (rdev->mode_info.bl_encoder) {
1667 u8 bl_level = radeon_get_backlight_level(rdev,
1668 rdev->mode_info.bl_encoder);
1669 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1670 bl_level);
1671 }
1672 }
1673
1674 radeon_hpd_init(rdev);
1675
1676 if (fbcon) {
1677 drm_helper_resume_force_mode(dev);
1678
1679 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1680 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1681 }
1682 }
1683
1684 drm_kms_helper_poll_enable(dev);
1685
1686
1687 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1688 radeon_pm_compute_clocks(rdev);
1689
1690 if (fbcon) {
1691 radeon_fbdev_set_suspend(rdev, 0);
1692 console_unlock();
1693 }
1694
1695 return 0;
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706int radeon_gpu_reset(struct radeon_device *rdev)
1707{
1708 unsigned ring_sizes[RADEON_NUM_RINGS];
1709 uint32_t *ring_data[RADEON_NUM_RINGS];
1710
1711 bool saved = false;
1712
1713 int i, r;
1714 int resched;
1715
1716 down_write(&rdev->exclusive_lock);
1717
1718 if (!rdev->needs_reset) {
1719 up_write(&rdev->exclusive_lock);
1720 return 0;
1721 }
1722
1723 radeon_save_bios_scratch_regs(rdev);
1724
1725 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1726 radeon_suspend(rdev);
1727 radeon_hpd_fini(rdev);
1728
1729 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1730 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1731 &ring_data[i]);
1732 if (ring_sizes[i]) {
1733 saved = true;
1734 dev_info(rdev->dev, "Saved %d dwords of commands "
1735 "on ring %d.\n", ring_sizes[i], i);
1736 }
1737 }
1738
1739 r = radeon_asic_reset(rdev);
1740 if (!r) {
1741 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1742 radeon_resume(rdev);
1743 }
1744
1745 radeon_restore_bios_scratch_regs(rdev);
1746
1747 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1748 if (!r && ring_data[i]) {
1749 radeon_ring_restore(rdev, &rdev->ring[i],
1750 ring_sizes[i], ring_data[i]);
1751 } else {
1752 radeon_fence_driver_force_completion(rdev, i);
1753 kfree(ring_data[i]);
1754 }
1755 }
1756
1757 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1758
1759 r = radeon_pm_late_init(rdev);
1760 if (r) {
1761 rdev->pm.dpm_enabled = false;
1762 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1763 }
1764 } else {
1765
1766 radeon_pm_resume(rdev);
1767 }
1768
1769
1770 if (rdev->is_atom_bios) {
1771 radeon_atom_encoder_init(rdev);
1772 radeon_atom_disp_eng_pll_init(rdev);
1773
1774 if (rdev->mode_info.bl_encoder) {
1775 u8 bl_level = radeon_get_backlight_level(rdev,
1776 rdev->mode_info.bl_encoder);
1777 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1778 bl_level);
1779 }
1780 }
1781
1782 radeon_hpd_init(rdev);
1783
1784 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1785
1786 rdev->in_reset = true;
1787 rdev->needs_reset = false;
1788
1789 downgrade_write(&rdev->exclusive_lock);
1790
1791 drm_helper_resume_force_mode(rdev->ddev);
1792
1793
1794 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1795 radeon_pm_compute_clocks(rdev);
1796
1797 if (!r) {
1798 r = radeon_ib_ring_tests(rdev);
1799 if (r && saved)
1800 r = -EAGAIN;
1801 } else {
1802
1803 dev_info(rdev->dev, "GPU reset failed\n");
1804 }
1805
1806 rdev->needs_reset = r == -EAGAIN;
1807 rdev->in_reset = false;
1808
1809 up_read(&rdev->exclusive_lock);
1810 return r;
1811}
1812
1813
1814
1815
1816
1817int radeon_debugfs_add_files(struct radeon_device *rdev,
1818 struct drm_info_list *files,
1819 unsigned nfiles)
1820{
1821 unsigned i;
1822
1823 for (i = 0; i < rdev->debugfs_count; i++) {
1824 if (rdev->debugfs[i].files == files) {
1825
1826 return 0;
1827 }
1828 }
1829
1830 i = rdev->debugfs_count + 1;
1831 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1832 DRM_ERROR("Reached maximum number of debugfs components.\n");
1833 DRM_ERROR("Report so we increase "
1834 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1835 return -EINVAL;
1836 }
1837 rdev->debugfs[rdev->debugfs_count].files = files;
1838 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1839 rdev->debugfs_count = i;
1840#if defined(CONFIG_DEBUG_FS)
1841 drm_debugfs_create_files(files, nfiles,
1842 rdev->ddev->control->debugfs_root,
1843 rdev->ddev->control);
1844 drm_debugfs_create_files(files, nfiles,
1845 rdev->ddev->primary->debugfs_root,
1846 rdev->ddev->primary);
1847#endif
1848 return 0;
1849}
1850
1851static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1852{
1853#if defined(CONFIG_DEBUG_FS)
1854 unsigned i;
1855
1856 for (i = 0; i < rdev->debugfs_count; i++) {
1857 drm_debugfs_remove_files(rdev->debugfs[i].files,
1858 rdev->debugfs[i].num_files,
1859 rdev->ddev->control);
1860 drm_debugfs_remove_files(rdev->debugfs[i].files,
1861 rdev->debugfs[i].num_files,
1862 rdev->ddev->primary);
1863 }
1864#endif
1865}
1866
1867#if defined(CONFIG_DEBUG_FS)
1868int radeon_debugfs_init(struct drm_minor *minor)
1869{
1870 return 0;
1871}
1872
1873void radeon_debugfs_cleanup(struct drm_minor *minor)
1874{
1875}
1876#endif
1877