1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/console.h>
30#include <linux/efi.h>
31#include <linux/pm_runtime.h>
32#include <linux/slab.h>
33#include <linux/vga_switcheroo.h>
34#include <linux/vgaarb.h>
35
36#include <drm/drm_cache.h>
37#include <drm/drm_crtc_helper.h>
38#include <drm/drm_debugfs.h>
39#include <drm/drm_device.h>
40#include <drm/drm_file.h>
41#include <drm/drm_pci.h>
42#include <drm/drm_probe_helper.h>
43#include <drm/radeon_drm.h>
44
45#include "radeon_reg.h"
46#include "radeon.h"
47#include "atom.h"
48
49static const char radeon_family_name[][16] = {
50 "R100",
51 "RV100",
52 "RS100",
53 "RV200",
54 "RS200",
55 "R200",
56 "RV250",
57 "RS300",
58 "RV280",
59 "R300",
60 "R350",
61 "RV350",
62 "RV380",
63 "R420",
64 "R423",
65 "RV410",
66 "RS400",
67 "RS480",
68 "RS600",
69 "RS690",
70 "RS740",
71 "RV515",
72 "R520",
73 "RV530",
74 "RV560",
75 "RV570",
76 "R580",
77 "R600",
78 "RV610",
79 "RV630",
80 "RV670",
81 "RV620",
82 "RV635",
83 "RS780",
84 "RS880",
85 "RV770",
86 "RV730",
87 "RV710",
88 "RV740",
89 "CEDAR",
90 "REDWOOD",
91 "JUNIPER",
92 "CYPRESS",
93 "HEMLOCK",
94 "PALM",
95 "SUMO",
96 "SUMO2",
97 "BARTS",
98 "TURKS",
99 "CAICOS",
100 "CAYMAN",
101 "ARUBA",
102 "TAHITI",
103 "PITCAIRN",
104 "VERDE",
105 "OLAND",
106 "HAINAN",
107 "BONAIRE",
108 "KAVERI",
109 "KABINI",
110 "HAWAII",
111 "MULLINS",
112 "LAST",
113};
114
115#if defined(CONFIG_VGA_SWITCHEROO)
116bool radeon_has_atpx_dgpu_power_cntl(void);
117bool radeon_is_atpx_hybrid(void);
118#else
119static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
120static inline bool radeon_is_atpx_hybrid(void) { return false; }
121#endif
122
123#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
124
125struct radeon_px_quirk {
126 u32 chip_vendor;
127 u32 chip_device;
128 u32 subsys_vendor;
129 u32 subsys_device;
130 u32 px_quirk_flags;
131};
132
133static struct radeon_px_quirk radeon_px_quirk_list[] = {
134
135
136
137 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
138
139
140
141 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
142
143
144
145 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
146
147
148
149 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
150
151
152
153 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
154 { 0, 0, 0, 0, 0 },
155};
156
157bool radeon_is_px(struct drm_device *dev)
158{
159 struct radeon_device *rdev = dev->dev_private;
160
161 if (rdev->flags & RADEON_IS_PX)
162 return true;
163 return false;
164}
165
166static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
167{
168 struct radeon_px_quirk *p = radeon_px_quirk_list;
169
170
171 while (p && p->chip_device != 0) {
172 if (rdev->pdev->vendor == p->chip_vendor &&
173 rdev->pdev->device == p->chip_device &&
174 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
175 rdev->pdev->subsystem_device == p->subsys_device) {
176 rdev->px_quirk_flags = p->px_quirk_flags;
177 break;
178 }
179 ++p;
180 }
181
182 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
183 rdev->flags &= ~RADEON_IS_PX;
184
185
186 if (!radeon_is_atpx_hybrid() &&
187 !radeon_has_atpx_dgpu_power_cntl())
188 rdev->flags &= ~RADEON_IS_PX;
189}
190
191
192
193
194
195
196
197
198
199
200
201void radeon_program_register_sequence(struct radeon_device *rdev,
202 const u32 *registers,
203 const u32 array_size)
204{
205 u32 tmp, reg, and_mask, or_mask;
206 int i;
207
208 if (array_size % 3)
209 return;
210
211 for (i = 0; i < array_size; i +=3) {
212 reg = registers[i + 0];
213 and_mask = registers[i + 1];
214 or_mask = registers[i + 2];
215
216 if (and_mask == 0xffffffff) {
217 tmp = or_mask;
218 } else {
219 tmp = RREG32(reg);
220 tmp &= ~and_mask;
221 tmp |= or_mask;
222 }
223 WREG32(reg, tmp);
224 }
225}
226
227void radeon_pci_config_reset(struct radeon_device *rdev)
228{
229 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
230}
231
232
233
234
235
236
237
238
239void radeon_surface_init(struct radeon_device *rdev)
240{
241
242 if (rdev->family < CHIP_R600) {
243 int i;
244
245 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
246 if (rdev->surface_regs[i].bo)
247 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
248 else
249 radeon_clear_surface_reg(rdev, i);
250 }
251
252 WREG32(RADEON_SURFACE_CNTL, 0);
253 }
254}
255
256
257
258
259
260
261
262
263
264
265
266void radeon_scratch_init(struct radeon_device *rdev)
267{
268 int i;
269
270
271 if (rdev->family < CHIP_R300) {
272 rdev->scratch.num_reg = 5;
273 } else {
274 rdev->scratch.num_reg = 7;
275 }
276 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
277 for (i = 0; i < rdev->scratch.num_reg; i++) {
278 rdev->scratch.free[i] = true;
279 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
280 }
281}
282
283
284
285
286
287
288
289
290
291
292int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
293{
294 int i;
295
296 for (i = 0; i < rdev->scratch.num_reg; i++) {
297 if (rdev->scratch.free[i]) {
298 rdev->scratch.free[i] = false;
299 *reg = rdev->scratch.reg[i];
300 return 0;
301 }
302 }
303 return -EINVAL;
304}
305
306
307
308
309
310
311
312
313
314void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
315{
316 int i;
317
318 for (i = 0; i < rdev->scratch.num_reg; i++) {
319 if (rdev->scratch.reg[i] == reg) {
320 rdev->scratch.free[i] = true;
321 return;
322 }
323 }
324}
325
326
327
328
329
330
331
332
333
334
335
336
337static int radeon_doorbell_init(struct radeon_device *rdev)
338{
339
340 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
341 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
342
343 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
344 if (rdev->doorbell.num_doorbells == 0)
345 return -EINVAL;
346
347 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
348 if (rdev->doorbell.ptr == NULL) {
349 return -ENOMEM;
350 }
351 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
352 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
353
354 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
355
356 return 0;
357}
358
359
360
361
362
363
364
365
366static void radeon_doorbell_fini(struct radeon_device *rdev)
367{
368 iounmap(rdev->doorbell.ptr);
369 rdev->doorbell.ptr = NULL;
370}
371
372
373
374
375
376
377
378
379
380
381int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
382{
383 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
384 if (offset < rdev->doorbell.num_doorbells) {
385 __set_bit(offset, rdev->doorbell.used);
386 *doorbell = offset;
387 return 0;
388 } else {
389 return -EINVAL;
390 }
391}
392
393
394
395
396
397
398
399
400
401void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
402{
403 if (doorbell < rdev->doorbell.num_doorbells)
404 __clear_bit(doorbell, rdev->doorbell.used);
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421void radeon_wb_disable(struct radeon_device *rdev)
422{
423 rdev->wb.enabled = false;
424}
425
426
427
428
429
430
431
432
433
434void radeon_wb_fini(struct radeon_device *rdev)
435{
436 radeon_wb_disable(rdev);
437 if (rdev->wb.wb_obj) {
438 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
439 radeon_bo_kunmap(rdev->wb.wb_obj);
440 radeon_bo_unpin(rdev->wb.wb_obj);
441 radeon_bo_unreserve(rdev->wb.wb_obj);
442 }
443 radeon_bo_unref(&rdev->wb.wb_obj);
444 rdev->wb.wb = NULL;
445 rdev->wb.wb_obj = NULL;
446 }
447}
448
449
450
451
452
453
454
455
456
457
458int radeon_wb_init(struct radeon_device *rdev)
459{
460 int r;
461
462 if (rdev->wb.wb_obj == NULL) {
463 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
464 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
465 &rdev->wb.wb_obj);
466 if (r) {
467 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
468 return r;
469 }
470 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
471 if (unlikely(r != 0)) {
472 radeon_wb_fini(rdev);
473 return r;
474 }
475 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
476 &rdev->wb.gpu_addr);
477 if (r) {
478 radeon_bo_unreserve(rdev->wb.wb_obj);
479 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
480 radeon_wb_fini(rdev);
481 return r;
482 }
483 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
484 radeon_bo_unreserve(rdev->wb.wb_obj);
485 if (r) {
486 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
487 radeon_wb_fini(rdev);
488 return r;
489 }
490 }
491
492
493 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
494
495 rdev->wb.use_event = false;
496
497 if (radeon_no_wb == 1) {
498 rdev->wb.enabled = false;
499 } else {
500 if (rdev->flags & RADEON_IS_AGP) {
501
502 rdev->wb.enabled = false;
503 } else if (rdev->family < CHIP_R300) {
504
505 rdev->wb.enabled = false;
506 } else {
507 rdev->wb.enabled = true;
508
509 if (rdev->family >= CHIP_R600) {
510 rdev->wb.use_event = true;
511 }
512 }
513 }
514
515 if (rdev->family >= CHIP_PALM) {
516 rdev->wb.enabled = true;
517 rdev->wb.use_event = true;
518 }
519
520 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
521
522 return 0;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
567{
568 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
569
570 mc->vram_start = base;
571 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
572 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
573 mc->real_vram_size = mc->aper_size;
574 mc->mc_vram_size = mc->aper_size;
575 }
576 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
577 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
578 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
579 mc->real_vram_size = mc->aper_size;
580 mc->mc_vram_size = mc->aper_size;
581 }
582 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
583 if (limit && limit < mc->real_vram_size)
584 mc->real_vram_size = limit;
585 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
586 mc->mc_vram_size >> 20, mc->vram_start,
587 mc->vram_end, mc->real_vram_size >> 20);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
603{
604 u64 size_af, size_bf;
605
606 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
607 size_bf = mc->vram_start & ~mc->gtt_base_align;
608 if (size_bf > size_af) {
609 if (mc->gtt_size > size_bf) {
610 dev_warn(rdev->dev, "limiting GTT\n");
611 mc->gtt_size = size_bf;
612 }
613 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
614 } else {
615 if (mc->gtt_size > size_af) {
616 dev_warn(rdev->dev, "limiting GTT\n");
617 mc->gtt_size = size_af;
618 }
619 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
620 }
621 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
622 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
623 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
624}
625
626
627
628
629
630
631
632
633
634
635
636
637bool radeon_device_is_virtual(void)
638{
639#ifdef CONFIG_X86
640 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
641#else
642 return false;
643#endif
644}
645
646
647
648
649
650
651
652
653
654
655bool radeon_card_posted(struct radeon_device *rdev)
656{
657 uint32_t reg;
658
659
660 if (rdev->family >= CHIP_BONAIRE &&
661 radeon_device_is_virtual())
662 return false;
663
664
665 if (efi_enabled(EFI_BOOT) &&
666 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
667 (rdev->family < CHIP_R600))
668 return false;
669
670 if (ASIC_IS_NODCE(rdev))
671 goto check_memsize;
672
673
674 if (ASIC_IS_DCE4(rdev)) {
675 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
676 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
677 if (rdev->num_crtc >= 4) {
678 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
679 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
680 }
681 if (rdev->num_crtc >= 6) {
682 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
683 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
684 }
685 if (reg & EVERGREEN_CRTC_MASTER_EN)
686 return true;
687 } else if (ASIC_IS_AVIVO(rdev)) {
688 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
689 RREG32(AVIVO_D2CRTC_CONTROL);
690 if (reg & AVIVO_CRTC_EN) {
691 return true;
692 }
693 } else {
694 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
695 RREG32(RADEON_CRTC2_GEN_CNTL);
696 if (reg & RADEON_CRTC_EN) {
697 return true;
698 }
699 }
700
701check_memsize:
702
703 if (rdev->family >= CHIP_R600)
704 reg = RREG32(R600_CONFIG_MEMSIZE);
705 else
706 reg = RREG32(RADEON_CONFIG_MEMSIZE);
707
708 if (reg)
709 return true;
710
711 return false;
712
713}
714
715
716
717
718
719
720
721
722
723void radeon_update_bandwidth_info(struct radeon_device *rdev)
724{
725 fixed20_12 a;
726 u32 sclk = rdev->pm.current_sclk;
727 u32 mclk = rdev->pm.current_mclk;
728
729
730 a.full = dfixed_const(100);
731 rdev->pm.sclk.full = dfixed_const(sclk);
732 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
733 rdev->pm.mclk.full = dfixed_const(mclk);
734 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
735
736 if (rdev->flags & RADEON_IS_IGP) {
737 a.full = dfixed_const(16);
738
739 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
740 }
741}
742
743
744
745
746
747
748
749
750
751
752bool radeon_boot_test_post_card(struct radeon_device *rdev)
753{
754 if (radeon_card_posted(rdev))
755 return true;
756
757 if (rdev->bios) {
758 DRM_INFO("GPU not posted. posting now...\n");
759 if (rdev->is_atom_bios)
760 atom_asic_init(rdev->mode_info.atom_context);
761 else
762 radeon_combios_asic_init(rdev->ddev);
763 return true;
764 } else {
765 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
766 return false;
767 }
768}
769
770
771
772
773
774
775
776
777
778
779
780int radeon_dummy_page_init(struct radeon_device *rdev)
781{
782 if (rdev->dummy_page.page)
783 return 0;
784 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
785 if (rdev->dummy_page.page == NULL)
786 return -ENOMEM;
787 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
788 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
789 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
790 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
791 __free_page(rdev->dummy_page.page);
792 rdev->dummy_page.page = NULL;
793 return -ENOMEM;
794 }
795 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
796 RADEON_GART_PAGE_DUMMY);
797 return 0;
798}
799
800
801
802
803
804
805
806
807void radeon_dummy_page_fini(struct radeon_device *rdev)
808{
809 if (rdev->dummy_page.page == NULL)
810 return;
811 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
812 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
813 __free_page(rdev->dummy_page.page);
814 rdev->dummy_page.page = NULL;
815}
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
837{
838 struct radeon_device *rdev = info->dev->dev_private;
839 uint32_t r;
840
841 r = rdev->pll_rreg(rdev, reg);
842 return r;
843}
844
845
846
847
848
849
850
851
852
853
854static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
855{
856 struct radeon_device *rdev = info->dev->dev_private;
857
858 rdev->pll_wreg(rdev, reg, val);
859}
860
861
862
863
864
865
866
867
868
869
870static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
871{
872 struct radeon_device *rdev = info->dev->dev_private;
873 uint32_t r;
874
875 r = rdev->mc_rreg(rdev, reg);
876 return r;
877}
878
879
880
881
882
883
884
885
886
887
888static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
889{
890 struct radeon_device *rdev = info->dev->dev_private;
891
892 rdev->mc_wreg(rdev, reg, val);
893}
894
895
896
897
898
899
900
901
902
903
904static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
905{
906 struct radeon_device *rdev = info->dev->dev_private;
907
908 WREG32(reg*4, val);
909}
910
911
912
913
914
915
916
917
918
919
920static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
921{
922 struct radeon_device *rdev = info->dev->dev_private;
923 uint32_t r;
924
925 r = RREG32(reg*4);
926 return r;
927}
928
929
930
931
932
933
934
935
936
937
938static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
939{
940 struct radeon_device *rdev = info->dev->dev_private;
941
942 WREG32_IO(reg*4, val);
943}
944
945
946
947
948
949
950
951
952
953
954static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
955{
956 struct radeon_device *rdev = info->dev->dev_private;
957 uint32_t r;
958
959 r = RREG32_IO(reg*4);
960 return r;
961}
962
963
964
965
966
967
968
969
970
971
972
973int radeon_atombios_init(struct radeon_device *rdev)
974{
975 struct card_info *atom_card_info =
976 kzalloc(sizeof(struct card_info), GFP_KERNEL);
977
978 if (!atom_card_info)
979 return -ENOMEM;
980
981 rdev->mode_info.atom_card_info = atom_card_info;
982 atom_card_info->dev = rdev->ddev;
983 atom_card_info->reg_read = cail_reg_read;
984 atom_card_info->reg_write = cail_reg_write;
985
986 if (rdev->rio_mem) {
987 atom_card_info->ioreg_read = cail_ioreg_read;
988 atom_card_info->ioreg_write = cail_ioreg_write;
989 } else {
990 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
991 atom_card_info->ioreg_read = cail_reg_read;
992 atom_card_info->ioreg_write = cail_reg_write;
993 }
994 atom_card_info->mc_read = cail_mc_read;
995 atom_card_info->mc_write = cail_mc_write;
996 atom_card_info->pll_read = cail_pll_read;
997 atom_card_info->pll_write = cail_pll_write;
998
999 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1000 if (!rdev->mode_info.atom_context) {
1001 radeon_atombios_fini(rdev);
1002 return -ENOMEM;
1003 }
1004
1005 mutex_init(&rdev->mode_info.atom_context->mutex);
1006 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1007 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1008 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1009 return 0;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021void radeon_atombios_fini(struct radeon_device *rdev)
1022{
1023 if (rdev->mode_info.atom_context) {
1024 kfree(rdev->mode_info.atom_context->scratch);
1025 }
1026 kfree(rdev->mode_info.atom_context);
1027 rdev->mode_info.atom_context = NULL;
1028 kfree(rdev->mode_info.atom_card_info);
1029 rdev->mode_info.atom_card_info = NULL;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048int radeon_combios_init(struct radeon_device *rdev)
1049{
1050 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void radeon_combios_fini(struct radeon_device *rdev)
1063{
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1077{
1078 struct radeon_device *rdev = cookie;
1079 radeon_vga_set_state(rdev, state);
1080 if (state)
1081 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1082 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1083 else
1084 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static bool radeon_check_pot_argument(int arg)
1096{
1097 return (arg & (arg - 1)) == 0;
1098}
1099
1100
1101
1102
1103
1104
1105static int radeon_gart_size_auto(enum radeon_family family)
1106{
1107
1108 if (family >= CHIP_TAHITI)
1109 return 2048;
1110 else if (family >= CHIP_RV770)
1111 return 1024;
1112 else
1113 return 512;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static void radeon_check_arguments(struct radeon_device *rdev)
1125{
1126
1127 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1128 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1129 radeon_vram_limit);
1130 radeon_vram_limit = 0;
1131 }
1132
1133 if (radeon_gart_size == -1) {
1134 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1135 }
1136
1137 if (radeon_gart_size < 32) {
1138 dev_warn(rdev->dev, "gart size (%d) too small\n",
1139 radeon_gart_size);
1140 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1141 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1142 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1143 radeon_gart_size);
1144 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1145 }
1146 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1147
1148
1149 switch (radeon_agpmode) {
1150 case -1:
1151 case 0:
1152 case 1:
1153 case 2:
1154 case 4:
1155 case 8:
1156 break;
1157 default:
1158 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1159 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1160 radeon_agpmode = 0;
1161 break;
1162 }
1163
1164 if (!radeon_check_pot_argument(radeon_vm_size)) {
1165 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1166 radeon_vm_size);
1167 radeon_vm_size = 4;
1168 }
1169
1170 if (radeon_vm_size < 1) {
1171 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1172 radeon_vm_size);
1173 radeon_vm_size = 4;
1174 }
1175
1176
1177
1178
1179 if (radeon_vm_size > 1024) {
1180 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1181 radeon_vm_size);
1182 radeon_vm_size = 4;
1183 }
1184
1185
1186
1187
1188 if (radeon_vm_block_size == -1) {
1189
1190
1191 unsigned bits = ilog2(radeon_vm_size) + 18;
1192
1193
1194
1195 if (radeon_vm_size <= 8)
1196 radeon_vm_block_size = bits - 9;
1197 else
1198 radeon_vm_block_size = (bits + 3) / 2;
1199
1200 } else if (radeon_vm_block_size < 9) {
1201 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1202 radeon_vm_block_size);
1203 radeon_vm_block_size = 9;
1204 }
1205
1206 if (radeon_vm_block_size > 24 ||
1207 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1208 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1209 radeon_vm_block_size);
1210 radeon_vm_block_size = 9;
1211 }
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1224{
1225 struct drm_device *dev = pci_get_drvdata(pdev);
1226
1227 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1228 return;
1229
1230 if (state == VGA_SWITCHEROO_ON) {
1231 pr_info("radeon: switched on\n");
1232
1233 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1234
1235 radeon_resume_kms(dev, true, true);
1236
1237 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1238 drm_kms_helper_poll_enable(dev);
1239 } else {
1240 pr_info("radeon: switched off\n");
1241 drm_kms_helper_poll_disable(dev);
1242 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1243 radeon_suspend_kms(dev, true, true, false);
1244 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1245 }
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1258{
1259 struct drm_device *dev = pci_get_drvdata(pdev);
1260
1261
1262
1263
1264
1265
1266 return dev->open_count == 0;
1267}
1268
1269static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1270 .set_gpu_state = radeon_switcheroo_set_state,
1271 .reprobe = NULL,
1272 .can_switch = radeon_switcheroo_can_switch,
1273};
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287int radeon_device_init(struct radeon_device *rdev,
1288 struct drm_device *ddev,
1289 struct pci_dev *pdev,
1290 uint32_t flags)
1291{
1292 int r, i;
1293 int dma_bits;
1294 bool runtime = false;
1295
1296 rdev->shutdown = false;
1297 rdev->dev = &pdev->dev;
1298 rdev->ddev = ddev;
1299 rdev->pdev = pdev;
1300 rdev->flags = flags;
1301 rdev->family = flags & RADEON_FAMILY_MASK;
1302 rdev->is_atom_bios = false;
1303 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1304 rdev->mc.gtt_size = 512 * 1024 * 1024;
1305 rdev->accel_working = false;
1306
1307 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1308 rdev->ring[i].idx = i;
1309 }
1310 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1311
1312 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1313 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1314 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1315
1316
1317
1318 mutex_init(&rdev->ring_lock);
1319 mutex_init(&rdev->dc_hw_i2c_mutex);
1320 atomic_set(&rdev->ih.lock, 0);
1321 mutex_init(&rdev->gem.mutex);
1322 mutex_init(&rdev->pm.mutex);
1323 mutex_init(&rdev->gpu_clock_mutex);
1324 mutex_init(&rdev->srbm_mutex);
1325 init_rwsem(&rdev->pm.mclk_lock);
1326 init_rwsem(&rdev->exclusive_lock);
1327 init_waitqueue_head(&rdev->irq.vblank_queue);
1328 r = radeon_gem_init(rdev);
1329 if (r)
1330 return r;
1331
1332 radeon_check_arguments(rdev);
1333
1334
1335
1336 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1337
1338
1339 r = radeon_asic_init(rdev);
1340 if (r)
1341 return r;
1342
1343
1344
1345
1346 if ((rdev->family >= CHIP_RS400) &&
1347 (rdev->flags & RADEON_IS_IGP)) {
1348 rdev->flags &= ~RADEON_IS_AGP;
1349 }
1350
1351 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1352 radeon_agp_disable(rdev);
1353 }
1354
1355
1356
1357
1358
1359 if (rdev->family >= CHIP_CAYMAN)
1360 rdev->mc.mc_mask = 0xffffffffffULL;
1361 else if (rdev->family >= CHIP_CEDAR)
1362 rdev->mc.mc_mask = 0xfffffffffULL;
1363 else
1364 rdev->mc.mc_mask = 0xffffffffULL;
1365
1366
1367
1368
1369
1370
1371
1372 dma_bits = 40;
1373 if (rdev->flags & RADEON_IS_AGP)
1374 dma_bits = 32;
1375 if ((rdev->flags & RADEON_IS_PCI) &&
1376 (rdev->family <= CHIP_RS740))
1377 dma_bits = 32;
1378#ifdef CONFIG_PPC64
1379 if (rdev->family == CHIP_CEDAR)
1380 dma_bits = 32;
1381#endif
1382
1383 r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1384 if (r) {
1385 pr_warn("radeon: No suitable DMA available\n");
1386 return r;
1387 }
1388 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1389
1390
1391
1392 spin_lock_init(&rdev->mmio_idx_lock);
1393 spin_lock_init(&rdev->smc_idx_lock);
1394 spin_lock_init(&rdev->pll_idx_lock);
1395 spin_lock_init(&rdev->mc_idx_lock);
1396 spin_lock_init(&rdev->pcie_idx_lock);
1397 spin_lock_init(&rdev->pciep_idx_lock);
1398 spin_lock_init(&rdev->pif_idx_lock);
1399 spin_lock_init(&rdev->cg_idx_lock);
1400 spin_lock_init(&rdev->uvd_idx_lock);
1401 spin_lock_init(&rdev->rcu_idx_lock);
1402 spin_lock_init(&rdev->didt_idx_lock);
1403 spin_lock_init(&rdev->end_idx_lock);
1404 if (rdev->family >= CHIP_BONAIRE) {
1405 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1406 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1407 } else {
1408 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1409 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1410 }
1411 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1412 if (rdev->rmmio == NULL)
1413 return -ENOMEM;
1414
1415
1416 if (rdev->family >= CHIP_BONAIRE)
1417 radeon_doorbell_init(rdev);
1418
1419
1420 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1421 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1422 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1423 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1424 break;
1425 }
1426 }
1427 if (rdev->rio_mem == NULL)
1428 DRM_ERROR("Unable to find PCI I/O BAR\n");
1429
1430 if (rdev->flags & RADEON_IS_PX)
1431 radeon_device_handle_px_quirks(rdev);
1432
1433
1434
1435
1436 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1437
1438 if (rdev->flags & RADEON_IS_PX)
1439 runtime = true;
1440 if (!pci_is_thunderbolt_attached(rdev->pdev))
1441 vga_switcheroo_register_client(rdev->pdev,
1442 &radeon_switcheroo_ops, runtime);
1443 if (runtime)
1444 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1445
1446 r = radeon_init(rdev);
1447 if (r)
1448 goto failed;
1449
1450 r = radeon_gem_debugfs_init(rdev);
1451 if (r) {
1452 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1453 }
1454
1455 r = radeon_mst_debugfs_init(rdev);
1456 if (r) {
1457 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
1458 }
1459
1460 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1461
1462
1463
1464 radeon_asic_reset(rdev);
1465 radeon_fini(rdev);
1466 radeon_agp_disable(rdev);
1467 r = radeon_init(rdev);
1468 if (r)
1469 goto failed;
1470 }
1471
1472 r = radeon_ib_ring_tests(rdev);
1473 if (r)
1474 DRM_ERROR("ib ring test failed (%d).\n", r);
1475
1476
1477
1478
1479
1480
1481 if (rdev->pm.dpm_enabled &&
1482 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1483 (rdev->family == CHIP_TURKS) &&
1484 (rdev->flags & RADEON_IS_MOBILITY)) {
1485 mutex_lock(&rdev->pm.mutex);
1486 radeon_dpm_disable(rdev);
1487 radeon_dpm_enable(rdev);
1488 mutex_unlock(&rdev->pm.mutex);
1489 }
1490
1491 if ((radeon_testing & 1)) {
1492 if (rdev->accel_working)
1493 radeon_test_moves(rdev);
1494 else
1495 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1496 }
1497 if ((radeon_testing & 2)) {
1498 if (rdev->accel_working)
1499 radeon_test_syncing(rdev);
1500 else
1501 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1502 }
1503 if (radeon_benchmarking) {
1504 if (rdev->accel_working)
1505 radeon_benchmark(rdev, radeon_benchmarking);
1506 else
1507 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1508 }
1509 return 0;
1510
1511failed:
1512
1513 if (radeon_is_px(ddev))
1514 pm_runtime_put_noidle(ddev->dev);
1515 if (runtime)
1516 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1517 return r;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528void radeon_device_fini(struct radeon_device *rdev)
1529{
1530 DRM_INFO("radeon: finishing device.\n");
1531 rdev->shutdown = true;
1532
1533 radeon_bo_evict_vram(rdev);
1534 radeon_fini(rdev);
1535 if (!pci_is_thunderbolt_attached(rdev->pdev))
1536 vga_switcheroo_unregister_client(rdev->pdev);
1537 if (rdev->flags & RADEON_IS_PX)
1538 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1539 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1540 if (rdev->rio_mem)
1541 pci_iounmap(rdev->pdev, rdev->rio_mem);
1542 rdev->rio_mem = NULL;
1543 iounmap(rdev->rmmio);
1544 rdev->rmmio = NULL;
1545 if (rdev->family >= CHIP_BONAIRE)
1546 radeon_doorbell_fini(rdev);
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1564 bool fbcon, bool freeze)
1565{
1566 struct radeon_device *rdev;
1567 struct drm_crtc *crtc;
1568 struct drm_connector *connector;
1569 int i, r;
1570
1571 if (dev == NULL || dev->dev_private == NULL) {
1572 return -ENODEV;
1573 }
1574
1575 rdev = dev->dev_private;
1576
1577 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1578 return 0;
1579
1580 drm_kms_helper_poll_disable(dev);
1581
1582 drm_modeset_lock_all(dev);
1583
1584 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1585 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1586 }
1587 drm_modeset_unlock_all(dev);
1588
1589
1590 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1591 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1592 struct drm_framebuffer *fb = crtc->primary->fb;
1593 struct radeon_bo *robj;
1594
1595 if (radeon_crtc->cursor_bo) {
1596 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1597 r = radeon_bo_reserve(robj, false);
1598 if (r == 0) {
1599 radeon_bo_unpin(robj);
1600 radeon_bo_unreserve(robj);
1601 }
1602 }
1603
1604 if (fb == NULL || fb->obj[0] == NULL) {
1605 continue;
1606 }
1607 robj = gem_to_radeon_bo(fb->obj[0]);
1608
1609 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1610 r = radeon_bo_reserve(robj, false);
1611 if (r == 0) {
1612 radeon_bo_unpin(robj);
1613 radeon_bo_unreserve(robj);
1614 }
1615 }
1616 }
1617
1618 radeon_bo_evict_vram(rdev);
1619
1620
1621 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1622 r = radeon_fence_wait_empty(rdev, i);
1623 if (r) {
1624
1625 radeon_fence_driver_force_completion(rdev, i);
1626 }
1627 }
1628
1629 radeon_save_bios_scratch_regs(rdev);
1630
1631 radeon_suspend(rdev);
1632 radeon_hpd_fini(rdev);
1633
1634
1635
1636
1637 radeon_bo_evict_vram(rdev);
1638
1639 radeon_agp_suspend(rdev);
1640
1641 pci_save_state(dev->pdev);
1642 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1643 rdev->asic->asic_reset(rdev, true);
1644 pci_restore_state(dev->pdev);
1645 } else if (suspend) {
1646
1647 pci_disable_device(dev->pdev);
1648 pci_set_power_state(dev->pdev, PCI_D3hot);
1649 }
1650
1651 if (fbcon) {
1652 console_lock();
1653 radeon_fbdev_set_suspend(rdev, 1);
1654 console_unlock();
1655 }
1656 return 0;
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1669{
1670 struct drm_connector *connector;
1671 struct radeon_device *rdev = dev->dev_private;
1672 struct drm_crtc *crtc;
1673 int r;
1674
1675 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1676 return 0;
1677
1678 if (fbcon) {
1679 console_lock();
1680 }
1681 if (resume) {
1682 pci_set_power_state(dev->pdev, PCI_D0);
1683 pci_restore_state(dev->pdev);
1684 if (pci_enable_device(dev->pdev)) {
1685 if (fbcon)
1686 console_unlock();
1687 return -1;
1688 }
1689 }
1690
1691 radeon_agp_resume(rdev);
1692 radeon_resume(rdev);
1693
1694 r = radeon_ib_ring_tests(rdev);
1695 if (r)
1696 DRM_ERROR("ib ring test failed (%d).\n", r);
1697
1698 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1699
1700 r = radeon_pm_late_init(rdev);
1701 if (r) {
1702 rdev->pm.dpm_enabled = false;
1703 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1704 }
1705 } else {
1706
1707 radeon_pm_resume(rdev);
1708 }
1709
1710 radeon_restore_bios_scratch_regs(rdev);
1711
1712
1713 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1714 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1715
1716 if (radeon_crtc->cursor_bo) {
1717 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1718 r = radeon_bo_reserve(robj, false);
1719 if (r == 0) {
1720
1721 r = radeon_bo_pin_restricted(robj,
1722 RADEON_GEM_DOMAIN_VRAM,
1723 ASIC_IS_AVIVO(rdev) ?
1724 0 : 1 << 27,
1725 &radeon_crtc->cursor_addr);
1726 if (r != 0)
1727 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1728 radeon_bo_unreserve(robj);
1729 }
1730 }
1731 }
1732
1733
1734 if (rdev->is_atom_bios) {
1735 radeon_atom_encoder_init(rdev);
1736 radeon_atom_disp_eng_pll_init(rdev);
1737
1738 if (rdev->mode_info.bl_encoder) {
1739 u8 bl_level = radeon_get_backlight_level(rdev,
1740 rdev->mode_info.bl_encoder);
1741 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1742 bl_level);
1743 }
1744 }
1745
1746 radeon_hpd_init(rdev);
1747
1748 if (fbcon) {
1749 drm_helper_resume_force_mode(dev);
1750
1751 drm_modeset_lock_all(dev);
1752 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1753 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1754 }
1755 drm_modeset_unlock_all(dev);
1756 }
1757
1758 drm_kms_helper_poll_enable(dev);
1759
1760
1761 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1762 radeon_pm_compute_clocks(rdev);
1763
1764 if (fbcon) {
1765 radeon_fbdev_set_suspend(rdev, 0);
1766 console_unlock();
1767 }
1768
1769 return 0;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780int radeon_gpu_reset(struct radeon_device *rdev)
1781{
1782 unsigned ring_sizes[RADEON_NUM_RINGS];
1783 uint32_t *ring_data[RADEON_NUM_RINGS];
1784
1785 bool saved = false;
1786
1787 int i, r;
1788 int resched;
1789
1790 down_write(&rdev->exclusive_lock);
1791
1792 if (!rdev->needs_reset) {
1793 up_write(&rdev->exclusive_lock);
1794 return 0;
1795 }
1796
1797 atomic_inc(&rdev->gpu_reset_counter);
1798
1799 radeon_save_bios_scratch_regs(rdev);
1800
1801 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1802 radeon_suspend(rdev);
1803 radeon_hpd_fini(rdev);
1804
1805 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1806 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1807 &ring_data[i]);
1808 if (ring_sizes[i]) {
1809 saved = true;
1810 dev_info(rdev->dev, "Saved %d dwords of commands "
1811 "on ring %d.\n", ring_sizes[i], i);
1812 }
1813 }
1814
1815 r = radeon_asic_reset(rdev);
1816 if (!r) {
1817 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1818 radeon_resume(rdev);
1819 }
1820
1821 radeon_restore_bios_scratch_regs(rdev);
1822
1823 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1824 if (!r && ring_data[i]) {
1825 radeon_ring_restore(rdev, &rdev->ring[i],
1826 ring_sizes[i], ring_data[i]);
1827 } else {
1828 radeon_fence_driver_force_completion(rdev, i);
1829 kfree(ring_data[i]);
1830 }
1831 }
1832
1833 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1834
1835 r = radeon_pm_late_init(rdev);
1836 if (r) {
1837 rdev->pm.dpm_enabled = false;
1838 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1839 }
1840 } else {
1841
1842 radeon_pm_resume(rdev);
1843 }
1844
1845
1846 if (rdev->is_atom_bios) {
1847 radeon_atom_encoder_init(rdev);
1848 radeon_atom_disp_eng_pll_init(rdev);
1849
1850 if (rdev->mode_info.bl_encoder) {
1851 u8 bl_level = radeon_get_backlight_level(rdev,
1852 rdev->mode_info.bl_encoder);
1853 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1854 bl_level);
1855 }
1856 }
1857
1858 radeon_hpd_init(rdev);
1859
1860 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1861
1862 rdev->in_reset = true;
1863 rdev->needs_reset = false;
1864
1865 downgrade_write(&rdev->exclusive_lock);
1866
1867 drm_helper_resume_force_mode(rdev->ddev);
1868
1869
1870 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1871 radeon_pm_compute_clocks(rdev);
1872
1873 if (!r) {
1874 r = radeon_ib_ring_tests(rdev);
1875 if (r && saved)
1876 r = -EAGAIN;
1877 } else {
1878
1879 dev_info(rdev->dev, "GPU reset failed\n");
1880 }
1881
1882 rdev->needs_reset = r == -EAGAIN;
1883 rdev->in_reset = false;
1884
1885 up_read(&rdev->exclusive_lock);
1886 return r;
1887}
1888
1889
1890
1891
1892
1893int radeon_debugfs_add_files(struct radeon_device *rdev,
1894 struct drm_info_list *files,
1895 unsigned nfiles)
1896{
1897 unsigned i;
1898
1899 for (i = 0; i < rdev->debugfs_count; i++) {
1900 if (rdev->debugfs[i].files == files) {
1901
1902 return 0;
1903 }
1904 }
1905
1906 i = rdev->debugfs_count + 1;
1907 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1908 DRM_ERROR("Reached maximum number of debugfs components.\n");
1909 DRM_ERROR("Report so we increase "
1910 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1911 return -EINVAL;
1912 }
1913 rdev->debugfs[rdev->debugfs_count].files = files;
1914 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1915 rdev->debugfs_count = i;
1916#if defined(CONFIG_DEBUG_FS)
1917 drm_debugfs_create_files(files, nfiles,
1918 rdev->ddev->primary->debugfs_root,
1919 rdev->ddev->primary);
1920#endif
1921 return 0;
1922}
1923