1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/console.h>
30#include <linux/efi.h>
31#include <linux/pm_runtime.h>
32#include <linux/slab.h>
33#include <linux/vga_switcheroo.h>
34#include <linux/vgaarb.h>
35
36#include <drm/drm_cache.h>
37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_debugfs.h>
39#include <drm/drm_device.h>
40#include <drm/drm_file.h>
41#include <drm/drm_pci.h>
42#include <drm/drm_probe_helper.h>
43#include <drm/radeon_drm.h>
44
45#include "radeon_reg.h"
46#include "radeon.h"
47#include "atom.h"
48
49static const char radeon_family_name[][16] = {
50 "R100",
51 "RV100",
52 "RS100",
53 "RV200",
54 "RS200",
55 "R200",
56 "RV250",
57 "RS300",
58 "RV280",
59 "R300",
60 "R350",
61 "RV350",
62 "RV380",
63 "R420",
64 "R423",
65 "RV410",
66 "RS400",
67 "RS480",
68 "RS600",
69 "RS690",
70 "RS740",
71 "RV515",
72 "R520",
73 "RV530",
74 "RV560",
75 "RV570",
76 "R580",
77 "R600",
78 "RV610",
79 "RV630",
80 "RV670",
81 "RV620",
82 "RV635",
83 "RS780",
84 "RS880",
85 "RV770",
86 "RV730",
87 "RV710",
88 "RV740",
89 "CEDAR",
90 "REDWOOD",
91 "JUNIPER",
92 "CYPRESS",
93 "HEMLOCK",
94 "PALM",
95 "SUMO",
96 "SUMO2",
97 "BARTS",
98 "TURKS",
99 "CAICOS",
100 "CAYMAN",
101 "ARUBA",
102 "TAHITI",
103 "PITCAIRN",
104 "VERDE",
105 "OLAND",
106 "HAINAN",
107 "BONAIRE",
108 "KAVERI",
109 "KABINI",
110 "HAWAII",
111 "MULLINS",
112 "LAST",
113};
114
115#if defined(CONFIG_VGA_SWITCHEROO)
116bool radeon_has_atpx_dgpu_power_cntl(void);
117bool radeon_is_atpx_hybrid(void);
118#else
119static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
120static inline bool radeon_is_atpx_hybrid(void) { return false; }
121#endif
122
123#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
124
125struct radeon_px_quirk {
126 u32 chip_vendor;
127 u32 chip_device;
128 u32 subsys_vendor;
129 u32 subsys_device;
130 u32 px_quirk_flags;
131};
132
133static struct radeon_px_quirk radeon_px_quirk_list[] = {
134
135
136
137 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
138
139
140
141 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
142
143
144
145 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
146
147
148
149 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
150
151
152
153 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
154 { 0, 0, 0, 0, 0 },
155};
156
157bool radeon_is_px(struct drm_device *dev)
158{
159 struct radeon_device *rdev = dev->dev_private;
160
161 if (rdev->flags & RADEON_IS_PX)
162 return true;
163 return false;
164}
165
166static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
167{
168 struct radeon_px_quirk *p = radeon_px_quirk_list;
169
170
171 while (p && p->chip_device != 0) {
172 if (rdev->pdev->vendor == p->chip_vendor &&
173 rdev->pdev->device == p->chip_device &&
174 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
175 rdev->pdev->subsystem_device == p->subsys_device) {
176 rdev->px_quirk_flags = p->px_quirk_flags;
177 break;
178 }
179 ++p;
180 }
181
182 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
183 rdev->flags &= ~RADEON_IS_PX;
184
185
186 if (!radeon_is_atpx_hybrid() &&
187 !radeon_has_atpx_dgpu_power_cntl())
188 rdev->flags &= ~RADEON_IS_PX;
189}
190
191
192
193
194
195
196
197
198
199
200
201void radeon_program_register_sequence(struct radeon_device *rdev,
202 const u32 *registers,
203 const u32 array_size)
204{
205 u32 tmp, reg, and_mask, or_mask;
206 int i;
207
208 if (array_size % 3)
209 return;
210
211 for (i = 0; i < array_size; i +=3) {
212 reg = registers[i + 0];
213 and_mask = registers[i + 1];
214 or_mask = registers[i + 2];
215
216 if (and_mask == 0xffffffff) {
217 tmp = or_mask;
218 } else {
219 tmp = RREG32(reg);
220 tmp &= ~and_mask;
221 tmp |= or_mask;
222 }
223 WREG32(reg, tmp);
224 }
225}
226
227void radeon_pci_config_reset(struct radeon_device *rdev)
228{
229 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
230}
231
232
233
234
235
236
237
238
239void radeon_surface_init(struct radeon_device *rdev)
240{
241
242 if (rdev->family < CHIP_R600) {
243 int i;
244
245 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
246 if (rdev->surface_regs[i].bo)
247 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
248 else
249 radeon_clear_surface_reg(rdev, i);
250 }
251
252 WREG32(RADEON_SURFACE_CNTL, 0);
253 }
254}
255
256
257
258
259
260
261
262
263
264
265
266void radeon_scratch_init(struct radeon_device *rdev)
267{
268 int i;
269
270
271 if (rdev->family < CHIP_R300) {
272 rdev->scratch.num_reg = 5;
273 } else {
274 rdev->scratch.num_reg = 7;
275 }
276 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
277 for (i = 0; i < rdev->scratch.num_reg; i++) {
278 rdev->scratch.free[i] = true;
279 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
280 }
281}
282
283
284
285
286
287
288
289
290
291
292int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
293{
294 int i;
295
296 for (i = 0; i < rdev->scratch.num_reg; i++) {
297 if (rdev->scratch.free[i]) {
298 rdev->scratch.free[i] = false;
299 *reg = rdev->scratch.reg[i];
300 return 0;
301 }
302 }
303 return -EINVAL;
304}
305
306
307
308
309
310
311
312
313
314void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
315{
316 int i;
317
318 for (i = 0; i < rdev->scratch.num_reg; i++) {
319 if (rdev->scratch.reg[i] == reg) {
320 rdev->scratch.free[i] = true;
321 return;
322 }
323 }
324}
325
326
327
328
329
330
331
332
333
334
335
336
337static int radeon_doorbell_init(struct radeon_device *rdev)
338{
339
340 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
341 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
342
343 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
344 if (rdev->doorbell.num_doorbells == 0)
345 return -EINVAL;
346
347 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
348 if (rdev->doorbell.ptr == NULL) {
349 return -ENOMEM;
350 }
351 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
352 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
353
354 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
355
356 return 0;
357}
358
359
360
361
362
363
364
365
366static void radeon_doorbell_fini(struct radeon_device *rdev)
367{
368 iounmap(rdev->doorbell.ptr);
369 rdev->doorbell.ptr = NULL;
370}
371
372
373
374
375
376
377
378
379
380
381int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
382{
383 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
384 if (offset < rdev->doorbell.num_doorbells) {
385 __set_bit(offset, rdev->doorbell.used);
386 *doorbell = offset;
387 return 0;
388 } else {
389 return -EINVAL;
390 }
391}
392
393
394
395
396
397
398
399
400
401void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
402{
403 if (doorbell < rdev->doorbell.num_doorbells)
404 __clear_bit(doorbell, rdev->doorbell.used);
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421void radeon_wb_disable(struct radeon_device *rdev)
422{
423 rdev->wb.enabled = false;
424}
425
426
427
428
429
430
431
432
433
434void radeon_wb_fini(struct radeon_device *rdev)
435{
436 radeon_wb_disable(rdev);
437 if (rdev->wb.wb_obj) {
438 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
439 radeon_bo_kunmap(rdev->wb.wb_obj);
440 radeon_bo_unpin(rdev->wb.wb_obj);
441 radeon_bo_unreserve(rdev->wb.wb_obj);
442 }
443 radeon_bo_unref(&rdev->wb.wb_obj);
444 rdev->wb.wb = NULL;
445 rdev->wb.wb_obj = NULL;
446 }
447}
448
449
450
451
452
453
454
455
456
457
458int radeon_wb_init(struct radeon_device *rdev)
459{
460 int r;
461
462 if (rdev->wb.wb_obj == NULL) {
463 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
464 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
465 &rdev->wb.wb_obj);
466 if (r) {
467 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
468 return r;
469 }
470 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
471 if (unlikely(r != 0)) {
472 radeon_wb_fini(rdev);
473 return r;
474 }
475 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
476 &rdev->wb.gpu_addr);
477 if (r) {
478 radeon_bo_unreserve(rdev->wb.wb_obj);
479 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
480 radeon_wb_fini(rdev);
481 return r;
482 }
483 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
484 radeon_bo_unreserve(rdev->wb.wb_obj);
485 if (r) {
486 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
487 radeon_wb_fini(rdev);
488 return r;
489 }
490 }
491
492
493 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
494
495 rdev->wb.use_event = false;
496
497 if (radeon_no_wb == 1) {
498 rdev->wb.enabled = false;
499 } else {
500 if (rdev->flags & RADEON_IS_AGP) {
501
502 rdev->wb.enabled = false;
503 } else if (rdev->family < CHIP_R300) {
504
505 rdev->wb.enabled = false;
506 } else {
507 rdev->wb.enabled = true;
508
509 if (rdev->family >= CHIP_R600) {
510 rdev->wb.use_event = true;
511 }
512 }
513 }
514
515 if (rdev->family >= CHIP_PALM) {
516 rdev->wb.enabled = true;
517 rdev->wb.use_event = true;
518 }
519
520 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
521
522 return 0;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
567{
568 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
569
570 mc->vram_start = base;
571 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
572 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
573 mc->real_vram_size = mc->aper_size;
574 mc->mc_vram_size = mc->aper_size;
575 }
576 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
577 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
578 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
579 mc->real_vram_size = mc->aper_size;
580 mc->mc_vram_size = mc->aper_size;
581 }
582 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
583 if (limit && limit < mc->real_vram_size)
584 mc->real_vram_size = limit;
585 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
586 mc->mc_vram_size >> 20, mc->vram_start,
587 mc->vram_end, mc->real_vram_size >> 20);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
603{
604 u64 size_af, size_bf;
605
606 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
607 size_bf = mc->vram_start & ~mc->gtt_base_align;
608 if (size_bf > size_af) {
609 if (mc->gtt_size > size_bf) {
610 dev_warn(rdev->dev, "limiting GTT\n");
611 mc->gtt_size = size_bf;
612 }
613 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
614 } else {
615 if (mc->gtt_size > size_af) {
616 dev_warn(rdev->dev, "limiting GTT\n");
617 mc->gtt_size = size_af;
618 }
619 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
620 }
621 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
622 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
623 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
624}
625
626
627
628
629
630
631
632
633
634
635
636
637bool radeon_device_is_virtual(void)
638{
639#ifdef CONFIG_X86
640 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
641#else
642 return false;
643#endif
644}
645
646
647
648
649
650
651
652
653
654
655bool radeon_card_posted(struct radeon_device *rdev)
656{
657 uint32_t reg;
658
659
660 if (rdev->family >= CHIP_BONAIRE &&
661 radeon_device_is_virtual())
662 return false;
663
664
665 if (efi_enabled(EFI_BOOT) &&
666 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
667 (rdev->family < CHIP_R600))
668 return false;
669
670 if (ASIC_IS_NODCE(rdev))
671 goto check_memsize;
672
673
674 if (ASIC_IS_DCE4(rdev)) {
675 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
676 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
677 if (rdev->num_crtc >= 4) {
678 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
679 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
680 }
681 if (rdev->num_crtc >= 6) {
682 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
683 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
684 }
685 if (reg & EVERGREEN_CRTC_MASTER_EN)
686 return true;
687 } else if (ASIC_IS_AVIVO(rdev)) {
688 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
689 RREG32(AVIVO_D2CRTC_CONTROL);
690 if (reg & AVIVO_CRTC_EN) {
691 return true;
692 }
693 } else {
694 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
695 RREG32(RADEON_CRTC2_GEN_CNTL);
696 if (reg & RADEON_CRTC_EN) {
697 return true;
698 }
699 }
700
701check_memsize:
702
703 if (rdev->family >= CHIP_R600)
704 reg = RREG32(R600_CONFIG_MEMSIZE);
705 else
706 reg = RREG32(RADEON_CONFIG_MEMSIZE);
707
708 if (reg)
709 return true;
710
711 return false;
712
713}
714
715
716
717
718
719
720
721
722
723void radeon_update_bandwidth_info(struct radeon_device *rdev)
724{
725 fixed20_12 a;
726 u32 sclk = rdev->pm.current_sclk;
727 u32 mclk = rdev->pm.current_mclk;
728
729
730 a.full = dfixed_const(100);
731 rdev->pm.sclk.full = dfixed_const(sclk);
732 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
733 rdev->pm.mclk.full = dfixed_const(mclk);
734 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
735
736 if (rdev->flags & RADEON_IS_IGP) {
737 a.full = dfixed_const(16);
738
739 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
740 }
741}
742
743
744
745
746
747
748
749
750
751
752bool radeon_boot_test_post_card(struct radeon_device *rdev)
753{
754 if (radeon_card_posted(rdev))
755 return true;
756
757 if (rdev->bios) {
758 DRM_INFO("GPU not posted. posting now...\n");
759 if (rdev->is_atom_bios)
760 atom_asic_init(rdev->mode_info.atom_context);
761 else
762 radeon_combios_asic_init(rdev->ddev);
763 return true;
764 } else {
765 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
766 return false;
767 }
768}
769
770
771
772
773
774
775
776
777
778
779
780int radeon_dummy_page_init(struct radeon_device *rdev)
781{
782 if (rdev->dummy_page.page)
783 return 0;
784 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
785 if (rdev->dummy_page.page == NULL)
786 return -ENOMEM;
787 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
788 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
789 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
790 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
791 __free_page(rdev->dummy_page.page);
792 rdev->dummy_page.page = NULL;
793 return -ENOMEM;
794 }
795 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
796 RADEON_GART_PAGE_DUMMY);
797 return 0;
798}
799
800
801
802
803
804
805
806
807void radeon_dummy_page_fini(struct radeon_device *rdev)
808{
809 if (rdev->dummy_page.page == NULL)
810 return;
811 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
812 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
813 __free_page(rdev->dummy_page.page);
814 rdev->dummy_page.page = NULL;
815}
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
837{
838 struct radeon_device *rdev = info->dev->dev_private;
839 uint32_t r;
840
841 r = rdev->pll_rreg(rdev, reg);
842 return r;
843}
844
845
846
847
848
849
850
851
852
853
854static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
855{
856 struct radeon_device *rdev = info->dev->dev_private;
857
858 rdev->pll_wreg(rdev, reg, val);
859}
860
861
862
863
864
865
866
867
868
869
870static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
871{
872 struct radeon_device *rdev = info->dev->dev_private;
873 uint32_t r;
874
875 r = rdev->mc_rreg(rdev, reg);
876 return r;
877}
878
879
880
881
882
883
884
885
886
887
888static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
889{
890 struct radeon_device *rdev = info->dev->dev_private;
891
892 rdev->mc_wreg(rdev, reg, val);
893}
894
895
896
897
898
899
900
901
902
903
904static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
905{
906 struct radeon_device *rdev = info->dev->dev_private;
907
908 WREG32(reg*4, val);
909}
910
911
912
913
914
915
916
917
918
919
920static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
921{
922 struct radeon_device *rdev = info->dev->dev_private;
923 uint32_t r;
924
925 r = RREG32(reg*4);
926 return r;
927}
928
929
930
931
932
933
934
935
936
937
938static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
939{
940 struct radeon_device *rdev = info->dev->dev_private;
941
942 WREG32_IO(reg*4, val);
943}
944
945
946
947
948
949
950
951
952
953
954static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
955{
956 struct radeon_device *rdev = info->dev->dev_private;
957 uint32_t r;
958
959 r = RREG32_IO(reg*4);
960 return r;
961}
962
963
964
965
966
967
968
969
970
971
972
973int radeon_atombios_init(struct radeon_device *rdev)
974{
975 struct card_info *atom_card_info =
976 kzalloc(sizeof(struct card_info), GFP_KERNEL);
977
978 if (!atom_card_info)
979 return -ENOMEM;
980
981 rdev->mode_info.atom_card_info = atom_card_info;
982 atom_card_info->dev = rdev->ddev;
983 atom_card_info->reg_read = cail_reg_read;
984 atom_card_info->reg_write = cail_reg_write;
985
986 if (rdev->rio_mem) {
987 atom_card_info->ioreg_read = cail_ioreg_read;
988 atom_card_info->ioreg_write = cail_ioreg_write;
989 } else {
990 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
991 atom_card_info->ioreg_read = cail_reg_read;
992 atom_card_info->ioreg_write = cail_reg_write;
993 }
994 atom_card_info->mc_read = cail_mc_read;
995 atom_card_info->mc_write = cail_mc_write;
996 atom_card_info->pll_read = cail_pll_read;
997 atom_card_info->pll_write = cail_pll_write;
998
999 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1000 if (!rdev->mode_info.atom_context) {
1001 radeon_atombios_fini(rdev);
1002 return -ENOMEM;
1003 }
1004
1005 mutex_init(&rdev->mode_info.atom_context->mutex);
1006 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1007 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1008 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1009 return 0;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021void radeon_atombios_fini(struct radeon_device *rdev)
1022{
1023 if (rdev->mode_info.atom_context) {
1024 kfree(rdev->mode_info.atom_context->scratch);
1025 }
1026 kfree(rdev->mode_info.atom_context);
1027 rdev->mode_info.atom_context = NULL;
1028 kfree(rdev->mode_info.atom_card_info);
1029 rdev->mode_info.atom_card_info = NULL;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048int radeon_combios_init(struct radeon_device *rdev)
1049{
1050 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void radeon_combios_fini(struct radeon_device *rdev)
1063{
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1077{
1078 struct radeon_device *rdev = cookie;
1079 radeon_vga_set_state(rdev, state);
1080 if (state)
1081 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1082 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1083 else
1084 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static bool radeon_check_pot_argument(int arg)
1096{
1097 return (arg & (arg - 1)) == 0;
1098}
1099
1100
1101
1102
1103
1104
1105static int radeon_gart_size_auto(enum radeon_family family)
1106{
1107
1108 if (family >= CHIP_TAHITI)
1109 return 2048;
1110 else if (family >= CHIP_RV770)
1111 return 1024;
1112 else
1113 return 512;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static void radeon_check_arguments(struct radeon_device *rdev)
1125{
1126
1127 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1128 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1129 radeon_vram_limit);
1130 radeon_vram_limit = 0;
1131 }
1132
1133 if (radeon_gart_size == -1) {
1134 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1135 }
1136
1137 if (radeon_gart_size < 32) {
1138 dev_warn(rdev->dev, "gart size (%d) too small\n",
1139 radeon_gart_size);
1140 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1141 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1142 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1143 radeon_gart_size);
1144 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1145 }
1146 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1147
1148
1149 switch (radeon_agpmode) {
1150 case -1:
1151 case 0:
1152 case 1:
1153 case 2:
1154 case 4:
1155 case 8:
1156 break;
1157 default:
1158 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1159 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1160 radeon_agpmode = 0;
1161 break;
1162 }
1163
1164 if (!radeon_check_pot_argument(radeon_vm_size)) {
1165 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1166 radeon_vm_size);
1167 radeon_vm_size = 4;
1168 }
1169
1170 if (radeon_vm_size < 1) {
1171 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1172 radeon_vm_size);
1173 radeon_vm_size = 4;
1174 }
1175
1176
1177
1178
1179 if (radeon_vm_size > 1024) {
1180 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1181 radeon_vm_size);
1182 radeon_vm_size = 4;
1183 }
1184
1185
1186
1187
1188 if (radeon_vm_block_size == -1) {
1189
1190
1191 unsigned bits = ilog2(radeon_vm_size) + 18;
1192
1193
1194
1195 if (radeon_vm_size <= 8)
1196 radeon_vm_block_size = bits - 9;
1197 else
1198 radeon_vm_block_size = (bits + 3) / 2;
1199
1200 } else if (radeon_vm_block_size < 9) {
1201 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1202 radeon_vm_block_size);
1203 radeon_vm_block_size = 9;
1204 }
1205
1206 if (radeon_vm_block_size > 24 ||
1207 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1208 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1209 radeon_vm_block_size);
1210 radeon_vm_block_size = 9;
1211 }
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1224{
1225 struct drm_device *dev = pci_get_drvdata(pdev);
1226
1227 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1228 return;
1229
1230 if (state == VGA_SWITCHEROO_ON) {
1231 pr_info("radeon: switched on\n");
1232
1233 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1234
1235 radeon_resume_kms(dev, true, true);
1236
1237 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1238 drm_kms_helper_poll_enable(dev);
1239 } else {
1240 pr_info("radeon: switched off\n");
1241 drm_kms_helper_poll_disable(dev);
1242 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1243 radeon_suspend_kms(dev, true, true, false);
1244 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1245 }
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1258{
1259 struct drm_device *dev = pci_get_drvdata(pdev);
1260
1261
1262
1263
1264
1265
1266 return dev->open_count == 0;
1267}
1268
1269static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1270 .set_gpu_state = radeon_switcheroo_set_state,
1271 .reprobe = NULL,
1272 .can_switch = radeon_switcheroo_can_switch,
1273};
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287int radeon_device_init(struct radeon_device *rdev,
1288 struct drm_device *ddev,
1289 struct pci_dev *pdev,
1290 uint32_t flags)
1291{
1292 int r, i;
1293 int dma_bits;
1294 bool runtime = false;
1295
1296 rdev->shutdown = false;
1297 rdev->dev = &pdev->dev;
1298 rdev->ddev = ddev;
1299 rdev->pdev = pdev;
1300 rdev->flags = flags;
1301 rdev->family = flags & RADEON_FAMILY_MASK;
1302 rdev->is_atom_bios = false;
1303 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1304 rdev->mc.gtt_size = 512 * 1024 * 1024;
1305 rdev->accel_working = false;
1306
1307 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1308 rdev->ring[i].idx = i;
1309 }
1310 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1311
1312 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1313 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1314 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1315
1316
1317
1318 mutex_init(&rdev->ring_lock);
1319 mutex_init(&rdev->dc_hw_i2c_mutex);
1320 atomic_set(&rdev->ih.lock, 0);
1321 mutex_init(&rdev->gem.mutex);
1322 mutex_init(&rdev->pm.mutex);
1323 mutex_init(&rdev->gpu_clock_mutex);
1324 mutex_init(&rdev->srbm_mutex);
1325 init_rwsem(&rdev->pm.mclk_lock);
1326 init_rwsem(&rdev->exclusive_lock);
1327 init_waitqueue_head(&rdev->irq.vblank_queue);
1328 mutex_init(&rdev->mn_lock);
1329 hash_init(rdev->mn_hash);
1330 r = radeon_gem_init(rdev);
1331 if (r)
1332 return r;
1333
1334 radeon_check_arguments(rdev);
1335
1336
1337
1338 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1339
1340
1341 r = radeon_asic_init(rdev);
1342 if (r)
1343 return r;
1344
1345
1346
1347
1348 if ((rdev->family >= CHIP_RS400) &&
1349 (rdev->flags & RADEON_IS_IGP)) {
1350 rdev->flags &= ~RADEON_IS_AGP;
1351 }
1352
1353 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1354 radeon_agp_disable(rdev);
1355 }
1356
1357
1358
1359
1360
1361 if (rdev->family >= CHIP_CAYMAN)
1362 rdev->mc.mc_mask = 0xffffffffffULL;
1363 else if (rdev->family >= CHIP_CEDAR)
1364 rdev->mc.mc_mask = 0xfffffffffULL;
1365 else
1366 rdev->mc.mc_mask = 0xffffffffULL;
1367
1368
1369
1370
1371
1372
1373
1374 rdev->need_dma32 = false;
1375 if (rdev->flags & RADEON_IS_AGP)
1376 rdev->need_dma32 = true;
1377 if ((rdev->flags & RADEON_IS_PCI) &&
1378 (rdev->family <= CHIP_RS740))
1379 rdev->need_dma32 = true;
1380#ifdef CONFIG_PPC64
1381 if (rdev->family == CHIP_CEDAR)
1382 rdev->need_dma32 = true;
1383#endif
1384
1385 dma_bits = rdev->need_dma32 ? 32 : 40;
1386 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1387 if (r) {
1388 rdev->need_dma32 = true;
1389 dma_bits = 32;
1390 pr_warn("radeon: No suitable DMA available\n");
1391 }
1392 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1393 if (r) {
1394 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1395 pr_warn("radeon: No coherent DMA available\n");
1396 }
1397 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1398
1399
1400
1401 spin_lock_init(&rdev->mmio_idx_lock);
1402 spin_lock_init(&rdev->smc_idx_lock);
1403 spin_lock_init(&rdev->pll_idx_lock);
1404 spin_lock_init(&rdev->mc_idx_lock);
1405 spin_lock_init(&rdev->pcie_idx_lock);
1406 spin_lock_init(&rdev->pciep_idx_lock);
1407 spin_lock_init(&rdev->pif_idx_lock);
1408 spin_lock_init(&rdev->cg_idx_lock);
1409 spin_lock_init(&rdev->uvd_idx_lock);
1410 spin_lock_init(&rdev->rcu_idx_lock);
1411 spin_lock_init(&rdev->didt_idx_lock);
1412 spin_lock_init(&rdev->end_idx_lock);
1413 if (rdev->family >= CHIP_BONAIRE) {
1414 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1415 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1416 } else {
1417 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1418 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1419 }
1420 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1421 if (rdev->rmmio == NULL)
1422 return -ENOMEM;
1423
1424
1425 if (rdev->family >= CHIP_BONAIRE)
1426 radeon_doorbell_init(rdev);
1427
1428
1429 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1430 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1431 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1432 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1433 break;
1434 }
1435 }
1436 if (rdev->rio_mem == NULL)
1437 DRM_ERROR("Unable to find PCI I/O BAR\n");
1438
1439 if (rdev->flags & RADEON_IS_PX)
1440 radeon_device_handle_px_quirks(rdev);
1441
1442
1443
1444
1445 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1446
1447 if (rdev->flags & RADEON_IS_PX)
1448 runtime = true;
1449 if (!pci_is_thunderbolt_attached(rdev->pdev))
1450 vga_switcheroo_register_client(rdev->pdev,
1451 &radeon_switcheroo_ops, runtime);
1452 if (runtime)
1453 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1454
1455 r = radeon_init(rdev);
1456 if (r)
1457 goto failed;
1458
1459 r = radeon_gem_debugfs_init(rdev);
1460 if (r) {
1461 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1462 }
1463
1464 r = radeon_mst_debugfs_init(rdev);
1465 if (r) {
1466 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1467 }
1468
1469 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1470
1471
1472
1473 radeon_asic_reset(rdev);
1474 radeon_fini(rdev);
1475 radeon_agp_disable(rdev);
1476 r = radeon_init(rdev);
1477 if (r)
1478 goto failed;
1479 }
1480
1481 r = radeon_ib_ring_tests(rdev);
1482 if (r)
1483 DRM_ERROR("ib ring test failed (%d).\n", r);
1484
1485
1486
1487
1488
1489
1490 if (rdev->pm.dpm_enabled &&
1491 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1492 (rdev->family == CHIP_TURKS) &&
1493 (rdev->flags & RADEON_IS_MOBILITY)) {
1494 mutex_lock(&rdev->pm.mutex);
1495 radeon_dpm_disable(rdev);
1496 radeon_dpm_enable(rdev);
1497 mutex_unlock(&rdev->pm.mutex);
1498 }
1499
1500 if ((radeon_testing & 1)) {
1501 if (rdev->accel_working)
1502 radeon_test_moves(rdev);
1503 else
1504 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1505 }
1506 if ((radeon_testing & 2)) {
1507 if (rdev->accel_working)
1508 radeon_test_syncing(rdev);
1509 else
1510 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1511 }
1512 if (radeon_benchmarking) {
1513 if (rdev->accel_working)
1514 radeon_benchmark(rdev, radeon_benchmarking);
1515 else
1516 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1517 }
1518 return 0;
1519
1520failed:
1521
1522 if (radeon_is_px(ddev))
1523 pm_runtime_put_noidle(ddev->dev);
1524 if (runtime)
1525 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1526 return r;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537void radeon_device_fini(struct radeon_device *rdev)
1538{
1539 DRM_INFO("radeon: finishing device.\n");
1540 rdev->shutdown = true;
1541
1542 radeon_bo_evict_vram(rdev);
1543 radeon_fini(rdev);
1544 if (!pci_is_thunderbolt_attached(rdev->pdev))
1545 vga_switcheroo_unregister_client(rdev->pdev);
1546 if (rdev->flags & RADEON_IS_PX)
1547 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1548 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1549 if (rdev->rio_mem)
1550 pci_iounmap(rdev->pdev, rdev->rio_mem);
1551 rdev->rio_mem = NULL;
1552 iounmap(rdev->rmmio);
1553 rdev->rmmio = NULL;
1554 if (rdev->family >= CHIP_BONAIRE)
1555 radeon_doorbell_fini(rdev);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1573 bool fbcon, bool freeze)
1574{
1575 struct radeon_device *rdev;
1576 struct drm_crtc *crtc;
1577 struct drm_connector *connector;
1578 int i, r;
1579
1580 if (dev == NULL || dev->dev_private == NULL) {
1581 return -ENODEV;
1582 }
1583
1584 rdev = dev->dev_private;
1585
1586 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1587 return 0;
1588
1589 drm_kms_helper_poll_disable(dev);
1590
1591 drm_modeset_lock_all(dev);
1592
1593 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1594 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1595 }
1596 drm_modeset_unlock_all(dev);
1597
1598
1599 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1600 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1601 struct drm_framebuffer *fb = crtc->primary->fb;
1602 struct radeon_bo *robj;
1603
1604 if (radeon_crtc->cursor_bo) {
1605 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1606 r = radeon_bo_reserve(robj, false);
1607 if (r == 0) {
1608 radeon_bo_unpin(robj);
1609 radeon_bo_unreserve(robj);
1610 }
1611 }
1612
1613 if (fb == NULL || fb->obj[0] == NULL) {
1614 continue;
1615 }
1616 robj = gem_to_radeon_bo(fb->obj[0]);
1617
1618 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1619 r = radeon_bo_reserve(robj, false);
1620 if (r == 0) {
1621 radeon_bo_unpin(robj);
1622 radeon_bo_unreserve(robj);
1623 }
1624 }
1625 }
1626
1627 radeon_bo_evict_vram(rdev);
1628
1629
1630 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1631 r = radeon_fence_wait_empty(rdev, i);
1632 if (r) {
1633
1634 radeon_fence_driver_force_completion(rdev, i);
1635 }
1636 }
1637
1638 radeon_save_bios_scratch_regs(rdev);
1639
1640 radeon_suspend(rdev);
1641 radeon_hpd_fini(rdev);
1642
1643
1644
1645
1646 radeon_bo_evict_vram(rdev);
1647
1648 radeon_agp_suspend(rdev);
1649
1650 pci_save_state(dev->pdev);
1651 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1652 rdev->asic->asic_reset(rdev, true);
1653 pci_restore_state(dev->pdev);
1654 } else if (suspend) {
1655
1656 pci_disable_device(dev->pdev);
1657 pci_set_power_state(dev->pdev, PCI_D3hot);
1658 }
1659
1660 if (fbcon) {
1661 console_lock();
1662 radeon_fbdev_set_suspend(rdev, 1);
1663 console_unlock();
1664 }
1665 return 0;
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1678{
1679 struct drm_connector *connector;
1680 struct radeon_device *rdev = dev->dev_private;
1681 struct drm_crtc *crtc;
1682 int r;
1683
1684 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1685 return 0;
1686
1687 if (fbcon) {
1688 console_lock();
1689 }
1690 if (resume) {
1691 pci_set_power_state(dev->pdev, PCI_D0);
1692 pci_restore_state(dev->pdev);
1693 if (pci_enable_device(dev->pdev)) {
1694 if (fbcon)
1695 console_unlock();
1696 return -1;
1697 }
1698 }
1699
1700 radeon_agp_resume(rdev);
1701 radeon_resume(rdev);
1702
1703 r = radeon_ib_ring_tests(rdev);
1704 if (r)
1705 DRM_ERROR("ib ring test failed (%d).\n", r);
1706
1707 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1708
1709 r = radeon_pm_late_init(rdev);
1710 if (r) {
1711 rdev->pm.dpm_enabled = false;
1712 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1713 }
1714 } else {
1715
1716 radeon_pm_resume(rdev);
1717 }
1718
1719 radeon_restore_bios_scratch_regs(rdev);
1720
1721
1722 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1723 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1724
1725 if (radeon_crtc->cursor_bo) {
1726 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1727 r = radeon_bo_reserve(robj, false);
1728 if (r == 0) {
1729
1730 r = radeon_bo_pin_restricted(robj,
1731 RADEON_GEM_DOMAIN_VRAM,
1732 ASIC_IS_AVIVO(rdev) ?
1733 0 : 1 << 27,
1734 &radeon_crtc->cursor_addr);
1735 if (r != 0)
1736 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1737 radeon_bo_unreserve(robj);
1738 }
1739 }
1740 }
1741
1742
1743 if (rdev->is_atom_bios) {
1744 radeon_atom_encoder_init(rdev);
1745 radeon_atom_disp_eng_pll_init(rdev);
1746
1747 if (rdev->mode_info.bl_encoder) {
1748 u8 bl_level = radeon_get_backlight_level(rdev,
1749 rdev->mode_info.bl_encoder);
1750 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1751 bl_level);
1752 }
1753 }
1754
1755 radeon_hpd_init(rdev);
1756
1757 if (fbcon) {
1758 drm_helper_resume_force_mode(dev);
1759
1760 drm_modeset_lock_all(dev);
1761 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1762 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1763 }
1764 drm_modeset_unlock_all(dev);
1765 }
1766
1767 drm_kms_helper_poll_enable(dev);
1768
1769
1770 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1771 radeon_pm_compute_clocks(rdev);
1772
1773 if (fbcon) {
1774 radeon_fbdev_set_suspend(rdev, 0);
1775 console_unlock();
1776 }
1777
1778 return 0;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789int radeon_gpu_reset(struct radeon_device *rdev)
1790{
1791 unsigned ring_sizes[RADEON_NUM_RINGS];
1792 uint32_t *ring_data[RADEON_NUM_RINGS];
1793
1794 bool saved = false;
1795
1796 int i, r;
1797 int resched;
1798
1799 down_write(&rdev->exclusive_lock);
1800
1801 if (!rdev->needs_reset) {
1802 up_write(&rdev->exclusive_lock);
1803 return 0;
1804 }
1805
1806 atomic_inc(&rdev->gpu_reset_counter);
1807
1808 radeon_save_bios_scratch_regs(rdev);
1809
1810 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1811 radeon_suspend(rdev);
1812 radeon_hpd_fini(rdev);
1813
1814 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1815 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1816 &ring_data[i]);
1817 if (ring_sizes[i]) {
1818 saved = true;
1819 dev_info(rdev->dev, "Saved %d dwords of commands "
1820 "on ring %d.\n", ring_sizes[i], i);
1821 }
1822 }
1823
1824 r = radeon_asic_reset(rdev);
1825 if (!r) {
1826 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1827 radeon_resume(rdev);
1828 }
1829
1830 radeon_restore_bios_scratch_regs(rdev);
1831
1832 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1833 if (!r && ring_data[i]) {
1834 radeon_ring_restore(rdev, &rdev->ring[i],
1835 ring_sizes[i], ring_data[i]);
1836 } else {
1837 radeon_fence_driver_force_completion(rdev, i);
1838 kfree(ring_data[i]);
1839 }
1840 }
1841
1842 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1843
1844 r = radeon_pm_late_init(rdev);
1845 if (r) {
1846 rdev->pm.dpm_enabled = false;
1847 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1848 }
1849 } else {
1850
1851 radeon_pm_resume(rdev);
1852 }
1853
1854
1855 if (rdev->is_atom_bios) {
1856 radeon_atom_encoder_init(rdev);
1857 radeon_atom_disp_eng_pll_init(rdev);
1858
1859 if (rdev->mode_info.bl_encoder) {
1860 u8 bl_level = radeon_get_backlight_level(rdev,
1861 rdev->mode_info.bl_encoder);
1862 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1863 bl_level);
1864 }
1865 }
1866
1867 radeon_hpd_init(rdev);
1868
1869 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1870
1871 rdev->in_reset = true;
1872 rdev->needs_reset = false;
1873
1874 downgrade_write(&rdev->exclusive_lock);
1875
1876 drm_helper_resume_force_mode(rdev->ddev);
1877
1878
1879 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1880 radeon_pm_compute_clocks(rdev);
1881
1882 if (!r) {
1883 r = radeon_ib_ring_tests(rdev);
1884 if (r && saved)
1885 r = -EAGAIN;
1886 } else {
1887
1888 dev_info(rdev->dev, "GPU reset failed\n");
1889 }
1890
1891 rdev->needs_reset = r == -EAGAIN;
1892 rdev->in_reset = false;
1893
1894 up_read(&rdev->exclusive_lock);
1895 return r;
1896}
1897
1898
1899
1900
1901
1902int radeon_debugfs_add_files(struct radeon_device *rdev,
1903 struct drm_info_list *files,
1904 unsigned nfiles)
1905{
1906 unsigned i;
1907
1908 for (i = 0; i < rdev->debugfs_count; i++) {
1909 if (rdev->debugfs[i].files == files) {
1910
1911 return 0;
1912 }
1913 }
1914
1915 i = rdev->debugfs_count + 1;
1916 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1917 DRM_ERROR("Reached maximum number of debugfs components.\n");
1918 DRM_ERROR("Report so we increase "
1919 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1920 return -EINVAL;
1921 }
1922 rdev->debugfs[rdev->debugfs_count].files = files;
1923 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1924 rdev->debugfs_count = i;
1925#if defined(CONFIG_DEBUG_FS)
1926 drm_debugfs_create_files(files, nfiles,
1927 rdev->ddev->primary->debugfs_root,
1928 rdev->ddev->primary);
1929#endif
1930 return 0;
1931}
1932