1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/console.h>
30#include <linux/efi.h>
31#include <linux/pci.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h>
34#include <linux/vga_switcheroo.h>
35#include <linux/vgaarb.h>
36
37#include <drm/drm_cache.h>
38#include <drm/drm_crtc_helper.h>
39#include <drm/drm_device.h>
40#include <drm/drm_file.h>
41#include <drm/drm_probe_helper.h>
42#include <drm/radeon_drm.h>
43
44#include "radeon_device.h"
45#include "radeon_reg.h"
46#include "radeon.h"
47#include "atom.h"
48
49static const char radeon_family_name[][16] = {
50 "R100",
51 "RV100",
52 "RS100",
53 "RV200",
54 "RS200",
55 "R200",
56 "RV250",
57 "RS300",
58 "RV280",
59 "R300",
60 "R350",
61 "RV350",
62 "RV380",
63 "R420",
64 "R423",
65 "RV410",
66 "RS400",
67 "RS480",
68 "RS600",
69 "RS690",
70 "RS740",
71 "RV515",
72 "R520",
73 "RV530",
74 "RV560",
75 "RV570",
76 "R580",
77 "R600",
78 "RV610",
79 "RV630",
80 "RV670",
81 "RV620",
82 "RV635",
83 "RS780",
84 "RS880",
85 "RV770",
86 "RV730",
87 "RV710",
88 "RV740",
89 "CEDAR",
90 "REDWOOD",
91 "JUNIPER",
92 "CYPRESS",
93 "HEMLOCK",
94 "PALM",
95 "SUMO",
96 "SUMO2",
97 "BARTS",
98 "TURKS",
99 "CAICOS",
100 "CAYMAN",
101 "ARUBA",
102 "TAHITI",
103 "PITCAIRN",
104 "VERDE",
105 "OLAND",
106 "HAINAN",
107 "BONAIRE",
108 "KAVERI",
109 "KABINI",
110 "HAWAII",
111 "MULLINS",
112 "LAST",
113};
114
115#if defined(CONFIG_VGA_SWITCHEROO)
116bool radeon_has_atpx_dgpu_power_cntl(void);
117bool radeon_is_atpx_hybrid(void);
118#else
119static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
120static inline bool radeon_is_atpx_hybrid(void) { return false; }
121#endif
122
123#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
124
125struct radeon_px_quirk {
126 u32 chip_vendor;
127 u32 chip_device;
128 u32 subsys_vendor;
129 u32 subsys_device;
130 u32 px_quirk_flags;
131};
132
133static struct radeon_px_quirk radeon_px_quirk_list[] = {
134
135
136
137 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
138
139
140
141 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
142
143
144
145 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
146
147
148
149 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
150
151
152
153 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
154 { 0, 0, 0, 0, 0 },
155};
156
157bool radeon_is_px(struct drm_device *dev)
158{
159 struct radeon_device *rdev = dev->dev_private;
160
161 if (rdev->flags & RADEON_IS_PX)
162 return true;
163 return false;
164}
165
166static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
167{
168 struct radeon_px_quirk *p = radeon_px_quirk_list;
169
170
171 while (p && p->chip_device != 0) {
172 if (rdev->pdev->vendor == p->chip_vendor &&
173 rdev->pdev->device == p->chip_device &&
174 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
175 rdev->pdev->subsystem_device == p->subsys_device) {
176 rdev->px_quirk_flags = p->px_quirk_flags;
177 break;
178 }
179 ++p;
180 }
181
182 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
183 rdev->flags &= ~RADEON_IS_PX;
184
185
186 if (!radeon_is_atpx_hybrid() &&
187 !radeon_has_atpx_dgpu_power_cntl())
188 rdev->flags &= ~RADEON_IS_PX;
189}
190
191
192
193
194
195
196
197
198
199
200
201void radeon_program_register_sequence(struct radeon_device *rdev,
202 const u32 *registers,
203 const u32 array_size)
204{
205 u32 tmp, reg, and_mask, or_mask;
206 int i;
207
208 if (array_size % 3)
209 return;
210
211 for (i = 0; i < array_size; i +=3) {
212 reg = registers[i + 0];
213 and_mask = registers[i + 1];
214 or_mask = registers[i + 2];
215
216 if (and_mask == 0xffffffff) {
217 tmp = or_mask;
218 } else {
219 tmp = RREG32(reg);
220 tmp &= ~and_mask;
221 tmp |= or_mask;
222 }
223 WREG32(reg, tmp);
224 }
225}
226
227void radeon_pci_config_reset(struct radeon_device *rdev)
228{
229 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
230}
231
232
233
234
235
236
237
238
239void radeon_surface_init(struct radeon_device *rdev)
240{
241
242 if (rdev->family < CHIP_R600) {
243 int i;
244
245 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
246 if (rdev->surface_regs[i].bo)
247 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
248 else
249 radeon_clear_surface_reg(rdev, i);
250 }
251
252 WREG32(RADEON_SURFACE_CNTL, 0);
253 }
254}
255
256
257
258
259
260
261
262
263
264
265
266void radeon_scratch_init(struct radeon_device *rdev)
267{
268 int i;
269
270
271 if (rdev->family < CHIP_R300) {
272 rdev->scratch.num_reg = 5;
273 } else {
274 rdev->scratch.num_reg = 7;
275 }
276 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
277 for (i = 0; i < rdev->scratch.num_reg; i++) {
278 rdev->scratch.free[i] = true;
279 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
280 }
281}
282
283
284
285
286
287
288
289
290
291
292int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
293{
294 int i;
295
296 for (i = 0; i < rdev->scratch.num_reg; i++) {
297 if (rdev->scratch.free[i]) {
298 rdev->scratch.free[i] = false;
299 *reg = rdev->scratch.reg[i];
300 return 0;
301 }
302 }
303 return -EINVAL;
304}
305
306
307
308
309
310
311
312
313
314void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
315{
316 int i;
317
318 for (i = 0; i < rdev->scratch.num_reg; i++) {
319 if (rdev->scratch.reg[i] == reg) {
320 rdev->scratch.free[i] = true;
321 return;
322 }
323 }
324}
325
326
327
328
329
330
331
332
333
334
335
336
337static int radeon_doorbell_init(struct radeon_device *rdev)
338{
339
340 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
341 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
342
343 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
344 if (rdev->doorbell.num_doorbells == 0)
345 return -EINVAL;
346
347 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
348 if (rdev->doorbell.ptr == NULL) {
349 return -ENOMEM;
350 }
351 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
352 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
353
354 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
355
356 return 0;
357}
358
359
360
361
362
363
364
365
366static void radeon_doorbell_fini(struct radeon_device *rdev)
367{
368 iounmap(rdev->doorbell.ptr);
369 rdev->doorbell.ptr = NULL;
370}
371
372
373
374
375
376
377
378
379
380
381int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
382{
383 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
384 if (offset < rdev->doorbell.num_doorbells) {
385 __set_bit(offset, rdev->doorbell.used);
386 *doorbell = offset;
387 return 0;
388 } else {
389 return -EINVAL;
390 }
391}
392
393
394
395
396
397
398
399
400
401void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
402{
403 if (doorbell < rdev->doorbell.num_doorbells)
404 __clear_bit(doorbell, rdev->doorbell.used);
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421void radeon_wb_disable(struct radeon_device *rdev)
422{
423 rdev->wb.enabled = false;
424}
425
426
427
428
429
430
431
432
433
434void radeon_wb_fini(struct radeon_device *rdev)
435{
436 radeon_wb_disable(rdev);
437 if (rdev->wb.wb_obj) {
438 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
439 radeon_bo_kunmap(rdev->wb.wb_obj);
440 radeon_bo_unpin(rdev->wb.wb_obj);
441 radeon_bo_unreserve(rdev->wb.wb_obj);
442 }
443 radeon_bo_unref(&rdev->wb.wb_obj);
444 rdev->wb.wb = NULL;
445 rdev->wb.wb_obj = NULL;
446 }
447}
448
449
450
451
452
453
454
455
456
457
458int radeon_wb_init(struct radeon_device *rdev)
459{
460 int r;
461
462 if (rdev->wb.wb_obj == NULL) {
463 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
464 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
465 &rdev->wb.wb_obj);
466 if (r) {
467 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
468 return r;
469 }
470 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
471 if (unlikely(r != 0)) {
472 radeon_wb_fini(rdev);
473 return r;
474 }
475 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
476 &rdev->wb.gpu_addr);
477 if (r) {
478 radeon_bo_unreserve(rdev->wb.wb_obj);
479 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
480 radeon_wb_fini(rdev);
481 return r;
482 }
483 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
484 radeon_bo_unreserve(rdev->wb.wb_obj);
485 if (r) {
486 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
487 radeon_wb_fini(rdev);
488 return r;
489 }
490 }
491
492
493 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
494
495 rdev->wb.use_event = false;
496
497 if (radeon_no_wb == 1) {
498 rdev->wb.enabled = false;
499 } else {
500 if (rdev->flags & RADEON_IS_AGP) {
501
502 rdev->wb.enabled = false;
503 } else if (rdev->family < CHIP_R300) {
504
505 rdev->wb.enabled = false;
506 } else {
507 rdev->wb.enabled = true;
508
509 if (rdev->family >= CHIP_R600) {
510 rdev->wb.use_event = true;
511 }
512 }
513 }
514
515 if (rdev->family >= CHIP_PALM) {
516 rdev->wb.enabled = true;
517 rdev->wb.use_event = true;
518 }
519
520 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
521
522 return 0;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
567{
568 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
569
570 mc->vram_start = base;
571 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
572 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
573 mc->real_vram_size = mc->aper_size;
574 mc->mc_vram_size = mc->aper_size;
575 }
576 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
577 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
578 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
579 mc->real_vram_size = mc->aper_size;
580 mc->mc_vram_size = mc->aper_size;
581 }
582 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
583 if (limit && limit < mc->real_vram_size)
584 mc->real_vram_size = limit;
585 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
586 mc->mc_vram_size >> 20, mc->vram_start,
587 mc->vram_end, mc->real_vram_size >> 20);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
603{
604 u64 size_af, size_bf;
605
606 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
607 size_bf = mc->vram_start & ~mc->gtt_base_align;
608 if (size_bf > size_af) {
609 if (mc->gtt_size > size_bf) {
610 dev_warn(rdev->dev, "limiting GTT\n");
611 mc->gtt_size = size_bf;
612 }
613 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
614 } else {
615 if (mc->gtt_size > size_af) {
616 dev_warn(rdev->dev, "limiting GTT\n");
617 mc->gtt_size = size_af;
618 }
619 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
620 }
621 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
622 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
623 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
624}
625
626
627
628
629
630
631
632
633
634
635
636
637bool radeon_device_is_virtual(void)
638{
639#ifdef CONFIG_X86
640 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
641#else
642 return false;
643#endif
644}
645
646
647
648
649
650
651
652
653
654
655bool radeon_card_posted(struct radeon_device *rdev)
656{
657 uint32_t reg;
658
659
660 if (rdev->family >= CHIP_BONAIRE &&
661 radeon_device_is_virtual())
662 return false;
663
664
665 if (efi_enabled(EFI_BOOT) &&
666 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
667 (rdev->family < CHIP_R600))
668 return false;
669
670 if (ASIC_IS_NODCE(rdev))
671 goto check_memsize;
672
673
674 if (ASIC_IS_DCE4(rdev)) {
675 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
676 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
677 if (rdev->num_crtc >= 4) {
678 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
679 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
680 }
681 if (rdev->num_crtc >= 6) {
682 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
683 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
684 }
685 if (reg & EVERGREEN_CRTC_MASTER_EN)
686 return true;
687 } else if (ASIC_IS_AVIVO(rdev)) {
688 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
689 RREG32(AVIVO_D2CRTC_CONTROL);
690 if (reg & AVIVO_CRTC_EN) {
691 return true;
692 }
693 } else {
694 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
695 RREG32(RADEON_CRTC2_GEN_CNTL);
696 if (reg & RADEON_CRTC_EN) {
697 return true;
698 }
699 }
700
701check_memsize:
702
703 if (rdev->family >= CHIP_R600)
704 reg = RREG32(R600_CONFIG_MEMSIZE);
705 else
706 reg = RREG32(RADEON_CONFIG_MEMSIZE);
707
708 if (reg)
709 return true;
710
711 return false;
712
713}
714
715
716
717
718
719
720
721
722
723void radeon_update_bandwidth_info(struct radeon_device *rdev)
724{
725 fixed20_12 a;
726 u32 sclk = rdev->pm.current_sclk;
727 u32 mclk = rdev->pm.current_mclk;
728
729
730 a.full = dfixed_const(100);
731 rdev->pm.sclk.full = dfixed_const(sclk);
732 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
733 rdev->pm.mclk.full = dfixed_const(mclk);
734 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
735
736 if (rdev->flags & RADEON_IS_IGP) {
737 a.full = dfixed_const(16);
738
739 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
740 }
741}
742
743
744
745
746
747
748
749
750
751
752bool radeon_boot_test_post_card(struct radeon_device *rdev)
753{
754 if (radeon_card_posted(rdev))
755 return true;
756
757 if (rdev->bios) {
758 DRM_INFO("GPU not posted. posting now...\n");
759 if (rdev->is_atom_bios)
760 atom_asic_init(rdev->mode_info.atom_context);
761 else
762 radeon_combios_asic_init(rdev->ddev);
763 return true;
764 } else {
765 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
766 return false;
767 }
768}
769
770
771
772
773
774
775
776
777
778
779
780int radeon_dummy_page_init(struct radeon_device *rdev)
781{
782 if (rdev->dummy_page.page)
783 return 0;
784 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
785 if (rdev->dummy_page.page == NULL)
786 return -ENOMEM;
787 rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
788 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
789 if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
790 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
791 __free_page(rdev->dummy_page.page);
792 rdev->dummy_page.page = NULL;
793 return -ENOMEM;
794 }
795 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
796 RADEON_GART_PAGE_DUMMY);
797 return 0;
798}
799
800
801
802
803
804
805
806
807void radeon_dummy_page_fini(struct radeon_device *rdev)
808{
809 if (rdev->dummy_page.page == NULL)
810 return;
811 dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
812 DMA_BIDIRECTIONAL);
813 __free_page(rdev->dummy_page.page);
814 rdev->dummy_page.page = NULL;
815}
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
837{
838 struct radeon_device *rdev = info->dev->dev_private;
839 uint32_t r;
840
841 r = rdev->pll_rreg(rdev, reg);
842 return r;
843}
844
845
846
847
848
849
850
851
852
853
854static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
855{
856 struct radeon_device *rdev = info->dev->dev_private;
857
858 rdev->pll_wreg(rdev, reg, val);
859}
860
861
862
863
864
865
866
867
868
869
870static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
871{
872 struct radeon_device *rdev = info->dev->dev_private;
873 uint32_t r;
874
875 r = rdev->mc_rreg(rdev, reg);
876 return r;
877}
878
879
880
881
882
883
884
885
886
887
888static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
889{
890 struct radeon_device *rdev = info->dev->dev_private;
891
892 rdev->mc_wreg(rdev, reg, val);
893}
894
895
896
897
898
899
900
901
902
903
904static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
905{
906 struct radeon_device *rdev = info->dev->dev_private;
907
908 WREG32(reg*4, val);
909}
910
911
912
913
914
915
916
917
918
919
920static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
921{
922 struct radeon_device *rdev = info->dev->dev_private;
923 uint32_t r;
924
925 r = RREG32(reg*4);
926 return r;
927}
928
929
930
931
932
933
934
935
936
937
938static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
939{
940 struct radeon_device *rdev = info->dev->dev_private;
941
942 WREG32_IO(reg*4, val);
943}
944
945
946
947
948
949
950
951
952
953
954static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
955{
956 struct radeon_device *rdev = info->dev->dev_private;
957 uint32_t r;
958
959 r = RREG32_IO(reg*4);
960 return r;
961}
962
963
964
965
966
967
968
969
970
971
972
973int radeon_atombios_init(struct radeon_device *rdev)
974{
975 struct card_info *atom_card_info =
976 kzalloc(sizeof(struct card_info), GFP_KERNEL);
977
978 if (!atom_card_info)
979 return -ENOMEM;
980
981 rdev->mode_info.atom_card_info = atom_card_info;
982 atom_card_info->dev = rdev->ddev;
983 atom_card_info->reg_read = cail_reg_read;
984 atom_card_info->reg_write = cail_reg_write;
985
986 if (rdev->rio_mem) {
987 atom_card_info->ioreg_read = cail_ioreg_read;
988 atom_card_info->ioreg_write = cail_ioreg_write;
989 } else {
990 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
991 atom_card_info->ioreg_read = cail_reg_read;
992 atom_card_info->ioreg_write = cail_reg_write;
993 }
994 atom_card_info->mc_read = cail_mc_read;
995 atom_card_info->mc_write = cail_mc_write;
996 atom_card_info->pll_read = cail_pll_read;
997 atom_card_info->pll_write = cail_pll_write;
998
999 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1000 if (!rdev->mode_info.atom_context) {
1001 radeon_atombios_fini(rdev);
1002 return -ENOMEM;
1003 }
1004
1005 mutex_init(&rdev->mode_info.atom_context->mutex);
1006 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1007 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1008 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1009 return 0;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021void radeon_atombios_fini(struct radeon_device *rdev)
1022{
1023 if (rdev->mode_info.atom_context) {
1024 kfree(rdev->mode_info.atom_context->scratch);
1025 }
1026 kfree(rdev->mode_info.atom_context);
1027 rdev->mode_info.atom_context = NULL;
1028 kfree(rdev->mode_info.atom_card_info);
1029 rdev->mode_info.atom_card_info = NULL;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048int radeon_combios_init(struct radeon_device *rdev)
1049{
1050 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void radeon_combios_fini(struct radeon_device *rdev)
1063{
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
1077{
1078 struct drm_device *dev = pci_get_drvdata(pdev);
1079 struct radeon_device *rdev = dev->dev_private;
1080 radeon_vga_set_state(rdev, state);
1081 if (state)
1082 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1083 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1084 else
1085 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static bool radeon_check_pot_argument(int arg)
1097{
1098 return (arg & (arg - 1)) == 0;
1099}
1100
1101
1102
1103
1104
1105
1106
1107static int radeon_gart_size_auto(enum radeon_family family)
1108{
1109
1110 if (family >= CHIP_TAHITI)
1111 return 2048;
1112 else if (family >= CHIP_RV770)
1113 return 1024;
1114 else
1115 return 512;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126static void radeon_check_arguments(struct radeon_device *rdev)
1127{
1128
1129 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1130 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1131 radeon_vram_limit);
1132 radeon_vram_limit = 0;
1133 }
1134
1135 if (radeon_gart_size == -1) {
1136 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1137 }
1138
1139 if (radeon_gart_size < 32) {
1140 dev_warn(rdev->dev, "gart size (%d) too small\n",
1141 radeon_gart_size);
1142 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1143 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1144 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1145 radeon_gart_size);
1146 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1147 }
1148 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1149
1150
1151 switch (radeon_agpmode) {
1152 case -1:
1153 case 0:
1154 case 1:
1155 case 2:
1156 case 4:
1157 case 8:
1158 break;
1159 default:
1160 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1161 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1162 radeon_agpmode = 0;
1163 break;
1164 }
1165
1166 if (!radeon_check_pot_argument(radeon_vm_size)) {
1167 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1168 radeon_vm_size);
1169 radeon_vm_size = 4;
1170 }
1171
1172 if (radeon_vm_size < 1) {
1173 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1174 radeon_vm_size);
1175 radeon_vm_size = 4;
1176 }
1177
1178
1179
1180
1181 if (radeon_vm_size > 1024) {
1182 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1183 radeon_vm_size);
1184 radeon_vm_size = 4;
1185 }
1186
1187
1188
1189
1190 if (radeon_vm_block_size == -1) {
1191
1192
1193 unsigned bits = ilog2(radeon_vm_size) + 18;
1194
1195
1196
1197 if (radeon_vm_size <= 8)
1198 radeon_vm_block_size = bits - 9;
1199 else
1200 radeon_vm_block_size = (bits + 3) / 2;
1201
1202 } else if (radeon_vm_block_size < 9) {
1203 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1204 radeon_vm_block_size);
1205 radeon_vm_block_size = 9;
1206 }
1207
1208 if (radeon_vm_block_size > 24 ||
1209 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1210 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1211 radeon_vm_block_size);
1212 radeon_vm_block_size = 9;
1213 }
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1226{
1227 struct drm_device *dev = pci_get_drvdata(pdev);
1228
1229 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1230 return;
1231
1232 if (state == VGA_SWITCHEROO_ON) {
1233 pr_info("radeon: switched on\n");
1234
1235 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1236
1237 radeon_resume_kms(dev, true, true);
1238
1239 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1240 drm_kms_helper_poll_enable(dev);
1241 } else {
1242 pr_info("radeon: switched off\n");
1243 drm_kms_helper_poll_disable(dev);
1244 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1245 radeon_suspend_kms(dev, true, true, false);
1246 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1247 }
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1260{
1261 struct drm_device *dev = pci_get_drvdata(pdev);
1262
1263
1264
1265
1266
1267
1268 return atomic_read(&dev->open_count) == 0;
1269}
1270
1271static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1272 .set_gpu_state = radeon_switcheroo_set_state,
1273 .reprobe = NULL,
1274 .can_switch = radeon_switcheroo_can_switch,
1275};
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289int radeon_device_init(struct radeon_device *rdev,
1290 struct drm_device *ddev,
1291 struct pci_dev *pdev,
1292 uint32_t flags)
1293{
1294 int r, i;
1295 int dma_bits;
1296 bool runtime = false;
1297
1298 rdev->shutdown = false;
1299 rdev->dev = &pdev->dev;
1300 rdev->ddev = ddev;
1301 rdev->pdev = pdev;
1302 rdev->flags = flags;
1303 rdev->family = flags & RADEON_FAMILY_MASK;
1304 rdev->is_atom_bios = false;
1305 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1306 rdev->mc.gtt_size = 512 * 1024 * 1024;
1307 rdev->accel_working = false;
1308
1309 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1310 rdev->ring[i].idx = i;
1311 }
1312 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1313
1314 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1315 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1316 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1317
1318
1319
1320 mutex_init(&rdev->ring_lock);
1321 mutex_init(&rdev->dc_hw_i2c_mutex);
1322 atomic_set(&rdev->ih.lock, 0);
1323 mutex_init(&rdev->gem.mutex);
1324 mutex_init(&rdev->pm.mutex);
1325 mutex_init(&rdev->gpu_clock_mutex);
1326 mutex_init(&rdev->srbm_mutex);
1327 init_rwsem(&rdev->pm.mclk_lock);
1328 init_rwsem(&rdev->exclusive_lock);
1329 init_waitqueue_head(&rdev->irq.vblank_queue);
1330 r = radeon_gem_init(rdev);
1331 if (r)
1332 return r;
1333
1334 radeon_check_arguments(rdev);
1335
1336
1337
1338 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1339
1340
1341 r = radeon_asic_init(rdev);
1342 if (r)
1343 return r;
1344
1345
1346
1347
1348 if ((rdev->family >= CHIP_RS400) &&
1349 (rdev->flags & RADEON_IS_IGP)) {
1350 rdev->flags &= ~RADEON_IS_AGP;
1351 }
1352
1353 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1354 radeon_agp_disable(rdev);
1355 }
1356
1357
1358
1359
1360
1361 if (rdev->family >= CHIP_CAYMAN)
1362 rdev->mc.mc_mask = 0xffffffffffULL;
1363 else if (rdev->family >= CHIP_CEDAR)
1364 rdev->mc.mc_mask = 0xfffffffffULL;
1365 else
1366 rdev->mc.mc_mask = 0xffffffffULL;
1367
1368
1369
1370
1371
1372
1373
1374 dma_bits = 40;
1375 if (rdev->flags & RADEON_IS_AGP)
1376 dma_bits = 32;
1377 if ((rdev->flags & RADEON_IS_PCI) &&
1378 (rdev->family <= CHIP_RS740))
1379 dma_bits = 32;
1380#ifdef CONFIG_PPC64
1381 if (rdev->family == CHIP_CEDAR)
1382 dma_bits = 32;
1383#endif
1384
1385 r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1386 if (r) {
1387 pr_warn("radeon: No suitable DMA available\n");
1388 return r;
1389 }
1390 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1391
1392
1393
1394 spin_lock_init(&rdev->mmio_idx_lock);
1395 spin_lock_init(&rdev->smc_idx_lock);
1396 spin_lock_init(&rdev->pll_idx_lock);
1397 spin_lock_init(&rdev->mc_idx_lock);
1398 spin_lock_init(&rdev->pcie_idx_lock);
1399 spin_lock_init(&rdev->pciep_idx_lock);
1400 spin_lock_init(&rdev->pif_idx_lock);
1401 spin_lock_init(&rdev->cg_idx_lock);
1402 spin_lock_init(&rdev->uvd_idx_lock);
1403 spin_lock_init(&rdev->rcu_idx_lock);
1404 spin_lock_init(&rdev->didt_idx_lock);
1405 spin_lock_init(&rdev->end_idx_lock);
1406 if (rdev->family >= CHIP_BONAIRE) {
1407 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1408 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1409 } else {
1410 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1411 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1412 }
1413 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1414 if (rdev->rmmio == NULL)
1415 return -ENOMEM;
1416
1417
1418 if (rdev->family >= CHIP_BONAIRE)
1419 radeon_doorbell_init(rdev);
1420
1421
1422 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1423 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1424 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1425 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1426 break;
1427 }
1428 }
1429 if (rdev->rio_mem == NULL)
1430 DRM_ERROR("Unable to find PCI I/O BAR\n");
1431
1432 if (rdev->flags & RADEON_IS_PX)
1433 radeon_device_handle_px_quirks(rdev);
1434
1435
1436
1437
1438 vga_client_register(rdev->pdev, radeon_vga_set_decode);
1439
1440 if (rdev->flags & RADEON_IS_PX)
1441 runtime = true;
1442 if (!pci_is_thunderbolt_attached(rdev->pdev))
1443 vga_switcheroo_register_client(rdev->pdev,
1444 &radeon_switcheroo_ops, runtime);
1445 if (runtime)
1446 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1447
1448 r = radeon_init(rdev);
1449 if (r)
1450 goto failed;
1451
1452 radeon_gem_debugfs_init(rdev);
1453 radeon_mst_debugfs_init(rdev);
1454
1455 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1456
1457
1458
1459 radeon_asic_reset(rdev);
1460 radeon_fini(rdev);
1461 radeon_agp_disable(rdev);
1462 r = radeon_init(rdev);
1463 if (r)
1464 goto failed;
1465 }
1466
1467 r = radeon_ib_ring_tests(rdev);
1468 if (r)
1469 DRM_ERROR("ib ring test failed (%d).\n", r);
1470
1471
1472
1473
1474
1475
1476 if (rdev->pm.dpm_enabled &&
1477 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1478 (rdev->family == CHIP_TURKS) &&
1479 (rdev->flags & RADEON_IS_MOBILITY)) {
1480 mutex_lock(&rdev->pm.mutex);
1481 radeon_dpm_disable(rdev);
1482 radeon_dpm_enable(rdev);
1483 mutex_unlock(&rdev->pm.mutex);
1484 }
1485
1486 if ((radeon_testing & 1)) {
1487 if (rdev->accel_working)
1488 radeon_test_moves(rdev);
1489 else
1490 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1491 }
1492 if ((radeon_testing & 2)) {
1493 if (rdev->accel_working)
1494 radeon_test_syncing(rdev);
1495 else
1496 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1497 }
1498 if (radeon_benchmarking) {
1499 if (rdev->accel_working)
1500 radeon_benchmark(rdev, radeon_benchmarking);
1501 else
1502 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1503 }
1504 return 0;
1505
1506failed:
1507
1508 if (radeon_is_px(ddev))
1509 pm_runtime_put_noidle(ddev->dev);
1510 if (runtime)
1511 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1512 return r;
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523void radeon_device_fini(struct radeon_device *rdev)
1524{
1525 DRM_INFO("radeon: finishing device.\n");
1526 rdev->shutdown = true;
1527
1528 radeon_bo_evict_vram(rdev);
1529 radeon_fini(rdev);
1530 if (!pci_is_thunderbolt_attached(rdev->pdev))
1531 vga_switcheroo_unregister_client(rdev->pdev);
1532 if (rdev->flags & RADEON_IS_PX)
1533 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1534 vga_client_unregister(rdev->pdev);
1535 if (rdev->rio_mem)
1536 pci_iounmap(rdev->pdev, rdev->rio_mem);
1537 rdev->rio_mem = NULL;
1538 iounmap(rdev->rmmio);
1539 rdev->rmmio = NULL;
1540 if (rdev->family >= CHIP_BONAIRE)
1541 radeon_doorbell_fini(rdev);
1542}
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1556 bool fbcon, bool freeze)
1557{
1558 struct radeon_device *rdev;
1559 struct pci_dev *pdev;
1560 struct drm_crtc *crtc;
1561 struct drm_connector *connector;
1562 int i, r;
1563
1564 if (dev == NULL || dev->dev_private == NULL) {
1565 return -ENODEV;
1566 }
1567
1568 rdev = dev->dev_private;
1569 pdev = to_pci_dev(dev->dev);
1570
1571 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1572 return 0;
1573
1574 drm_kms_helper_poll_disable(dev);
1575
1576 drm_modeset_lock_all(dev);
1577
1578 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1579 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1580 }
1581 drm_modeset_unlock_all(dev);
1582
1583
1584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1585 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1586 struct drm_framebuffer *fb = crtc->primary->fb;
1587 struct radeon_bo *robj;
1588
1589 if (radeon_crtc->cursor_bo) {
1590 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1591 r = radeon_bo_reserve(robj, false);
1592 if (r == 0) {
1593 radeon_bo_unpin(robj);
1594 radeon_bo_unreserve(robj);
1595 }
1596 }
1597
1598 if (fb == NULL || fb->obj[0] == NULL) {
1599 continue;
1600 }
1601 robj = gem_to_radeon_bo(fb->obj[0]);
1602
1603 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1604 r = radeon_bo_reserve(robj, false);
1605 if (r == 0) {
1606 radeon_bo_unpin(robj);
1607 radeon_bo_unreserve(robj);
1608 }
1609 }
1610 }
1611
1612 radeon_bo_evict_vram(rdev);
1613
1614
1615 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1616 r = radeon_fence_wait_empty(rdev, i);
1617 if (r) {
1618
1619 radeon_fence_driver_force_completion(rdev, i);
1620 }
1621 }
1622
1623 radeon_save_bios_scratch_regs(rdev);
1624
1625 radeon_suspend(rdev);
1626 radeon_hpd_fini(rdev);
1627
1628
1629
1630
1631 radeon_bo_evict_vram(rdev);
1632
1633 radeon_agp_suspend(rdev);
1634
1635 pci_save_state(pdev);
1636 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1637 rdev->asic->asic_reset(rdev, true);
1638 pci_restore_state(pdev);
1639 } else if (suspend) {
1640
1641 pci_disable_device(pdev);
1642 pci_set_power_state(pdev, PCI_D3hot);
1643 }
1644
1645 if (fbcon) {
1646 console_lock();
1647 radeon_fbdev_set_suspend(rdev, 1);
1648 console_unlock();
1649 }
1650 return 0;
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1661{
1662 struct drm_connector *connector;
1663 struct radeon_device *rdev = dev->dev_private;
1664 struct pci_dev *pdev = to_pci_dev(dev->dev);
1665 struct drm_crtc *crtc;
1666 int r;
1667
1668 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1669 return 0;
1670
1671 if (fbcon) {
1672 console_lock();
1673 }
1674 if (resume) {
1675 pci_set_power_state(pdev, PCI_D0);
1676 pci_restore_state(pdev);
1677 if (pci_enable_device(pdev)) {
1678 if (fbcon)
1679 console_unlock();
1680 return -1;
1681 }
1682 }
1683
1684 radeon_agp_resume(rdev);
1685 radeon_resume(rdev);
1686
1687 r = radeon_ib_ring_tests(rdev);
1688 if (r)
1689 DRM_ERROR("ib ring test failed (%d).\n", r);
1690
1691 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1692
1693 r = radeon_pm_late_init(rdev);
1694 if (r) {
1695 rdev->pm.dpm_enabled = false;
1696 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1697 }
1698 } else {
1699
1700 radeon_pm_resume(rdev);
1701 }
1702
1703 radeon_restore_bios_scratch_regs(rdev);
1704
1705
1706 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1707 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1708
1709 if (radeon_crtc->cursor_bo) {
1710 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1711 r = radeon_bo_reserve(robj, false);
1712 if (r == 0) {
1713
1714 r = radeon_bo_pin_restricted(robj,
1715 RADEON_GEM_DOMAIN_VRAM,
1716 ASIC_IS_AVIVO(rdev) ?
1717 0 : 1 << 27,
1718 &radeon_crtc->cursor_addr);
1719 if (r != 0)
1720 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1721 radeon_bo_unreserve(robj);
1722 }
1723 }
1724 }
1725
1726
1727 if (rdev->is_atom_bios) {
1728 radeon_atom_encoder_init(rdev);
1729 radeon_atom_disp_eng_pll_init(rdev);
1730
1731 if (rdev->mode_info.bl_encoder) {
1732 u8 bl_level = radeon_get_backlight_level(rdev,
1733 rdev->mode_info.bl_encoder);
1734 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1735 bl_level);
1736 }
1737 }
1738
1739 radeon_hpd_init(rdev);
1740
1741 if (fbcon) {
1742 drm_helper_resume_force_mode(dev);
1743
1744 drm_modeset_lock_all(dev);
1745 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1746 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1747 }
1748 drm_modeset_unlock_all(dev);
1749 }
1750
1751 drm_kms_helper_poll_enable(dev);
1752
1753
1754 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1755 radeon_pm_compute_clocks(rdev);
1756
1757 if (fbcon) {
1758 radeon_fbdev_set_suspend(rdev, 0);
1759 console_unlock();
1760 }
1761
1762 return 0;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773int radeon_gpu_reset(struct radeon_device *rdev)
1774{
1775 unsigned ring_sizes[RADEON_NUM_RINGS];
1776 uint32_t *ring_data[RADEON_NUM_RINGS];
1777
1778 bool saved = false;
1779
1780 int i, r;
1781 int resched;
1782
1783 down_write(&rdev->exclusive_lock);
1784
1785 if (!rdev->needs_reset) {
1786 up_write(&rdev->exclusive_lock);
1787 return 0;
1788 }
1789
1790 atomic_inc(&rdev->gpu_reset_counter);
1791
1792 radeon_save_bios_scratch_regs(rdev);
1793
1794 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1795 radeon_suspend(rdev);
1796 radeon_hpd_fini(rdev);
1797
1798 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1799 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1800 &ring_data[i]);
1801 if (ring_sizes[i]) {
1802 saved = true;
1803 dev_info(rdev->dev, "Saved %d dwords of commands "
1804 "on ring %d.\n", ring_sizes[i], i);
1805 }
1806 }
1807
1808 r = radeon_asic_reset(rdev);
1809 if (!r) {
1810 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1811 radeon_resume(rdev);
1812 }
1813
1814 radeon_restore_bios_scratch_regs(rdev);
1815
1816 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1817 if (!r && ring_data[i]) {
1818 radeon_ring_restore(rdev, &rdev->ring[i],
1819 ring_sizes[i], ring_data[i]);
1820 } else {
1821 radeon_fence_driver_force_completion(rdev, i);
1822 kfree(ring_data[i]);
1823 }
1824 }
1825
1826 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1827
1828 r = radeon_pm_late_init(rdev);
1829 if (r) {
1830 rdev->pm.dpm_enabled = false;
1831 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1832 }
1833 } else {
1834
1835 radeon_pm_resume(rdev);
1836 }
1837
1838
1839 if (rdev->is_atom_bios) {
1840 radeon_atom_encoder_init(rdev);
1841 radeon_atom_disp_eng_pll_init(rdev);
1842
1843 if (rdev->mode_info.bl_encoder) {
1844 u8 bl_level = radeon_get_backlight_level(rdev,
1845 rdev->mode_info.bl_encoder);
1846 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1847 bl_level);
1848 }
1849 }
1850
1851 radeon_hpd_init(rdev);
1852
1853 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1854
1855 rdev->in_reset = true;
1856 rdev->needs_reset = false;
1857
1858 downgrade_write(&rdev->exclusive_lock);
1859
1860 drm_helper_resume_force_mode(rdev->ddev);
1861
1862
1863 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1864 radeon_pm_compute_clocks(rdev);
1865
1866 if (!r) {
1867 r = radeon_ib_ring_tests(rdev);
1868 if (r && saved)
1869 r = -EAGAIN;
1870 } else {
1871
1872 dev_info(rdev->dev, "GPU reset failed\n");
1873 }
1874
1875 rdev->needs_reset = r == -EAGAIN;
1876 rdev->in_reset = false;
1877
1878 up_read(&rdev->exclusive_lock);
1879 return r;
1880}
1881