1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
35#include <linux/efi.h>
36#include "radeon_reg.h"
37#include "radeon.h"
38#include "atom.h"
39
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
85 "PALM",
86 "SUMO",
87 "SUMO2",
88 "BARTS",
89 "TURKS",
90 "CAICOS",
91 "CAYMAN",
92 "ARUBA",
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
96 "OLAND",
97 "HAINAN",
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
101 "LAST",
102};
103
104
105
106
107
108
109
110
111
112
113
114void radeon_program_register_sequence(struct radeon_device *rdev,
115 const u32 *registers,
116 const u32 array_size)
117{
118 u32 tmp, reg, and_mask, or_mask;
119 int i;
120
121 if (array_size % 3)
122 return;
123
124 for (i = 0; i < array_size; i +=3) {
125 reg = registers[i + 0];
126 and_mask = registers[i + 1];
127 or_mask = registers[i + 2];
128
129 if (and_mask == 0xffffffff) {
130 tmp = or_mask;
131 } else {
132 tmp = RREG32(reg);
133 tmp &= ~and_mask;
134 tmp |= or_mask;
135 }
136 WREG32(reg, tmp);
137 }
138}
139
140
141
142
143
144
145
146
147void radeon_surface_init(struct radeon_device *rdev)
148{
149
150 if (rdev->family < CHIP_R600) {
151 int i;
152
153 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
154 if (rdev->surface_regs[i].bo)
155 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
156 else
157 radeon_clear_surface_reg(rdev, i);
158 }
159
160 WREG32(RADEON_SURFACE_CNTL, 0);
161 }
162}
163
164
165
166
167
168
169
170
171
172
173
174void radeon_scratch_init(struct radeon_device *rdev)
175{
176 int i;
177
178
179 if (rdev->family < CHIP_R300) {
180 rdev->scratch.num_reg = 5;
181 } else {
182 rdev->scratch.num_reg = 7;
183 }
184 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 rdev->scratch.free[i] = true;
187 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
188 }
189}
190
191
192
193
194
195
196
197
198
199
200int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
201{
202 int i;
203
204 for (i = 0; i < rdev->scratch.num_reg; i++) {
205 if (rdev->scratch.free[i]) {
206 rdev->scratch.free[i] = false;
207 *reg = rdev->scratch.reg[i];
208 return 0;
209 }
210 }
211 return -EINVAL;
212}
213
214
215
216
217
218
219
220
221
222void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
223{
224 int i;
225
226 for (i = 0; i < rdev->scratch.num_reg; i++) {
227 if (rdev->scratch.reg[i] == reg) {
228 rdev->scratch.free[i] = true;
229 return;
230 }
231 }
232}
233
234
235
236
237
238
239
240
241
242
243
244
245int radeon_doorbell_init(struct radeon_device *rdev)
246{
247 int i;
248
249
250 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
251 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
252
253
254 if (rdev->doorbell.size > (4 * 1024 * 1024))
255 rdev->doorbell.size = 4 * 1024 * 1024;
256
257 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
258 if (rdev->doorbell.ptr == NULL) {
259 return -ENOMEM;
260 }
261 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
262 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
263
264 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
265
266 for (i = 0; i < rdev->doorbell.num_pages; i++) {
267 rdev->doorbell.free[i] = true;
268 }
269 return 0;
270}
271
272
273
274
275
276
277
278
279void radeon_doorbell_fini(struct radeon_device *rdev)
280{
281 iounmap(rdev->doorbell.ptr);
282 rdev->doorbell.ptr = NULL;
283}
284
285
286
287
288
289
290
291
292
293
294int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
295{
296 int i;
297
298 for (i = 0; i < rdev->doorbell.num_pages; i++) {
299 if (rdev->doorbell.free[i]) {
300 rdev->doorbell.free[i] = false;
301 *doorbell = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308
309
310
311
312
313
314
315
316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
317{
318 if (doorbell < rdev->doorbell.num_pages)
319 rdev->doorbell.free[doorbell] = true;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336void radeon_wb_disable(struct radeon_device *rdev)
337{
338 rdev->wb.enabled = false;
339}
340
341
342
343
344
345
346
347
348
349void radeon_wb_fini(struct radeon_device *rdev)
350{
351 radeon_wb_disable(rdev);
352 if (rdev->wb.wb_obj) {
353 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
354 radeon_bo_kunmap(rdev->wb.wb_obj);
355 radeon_bo_unpin(rdev->wb.wb_obj);
356 radeon_bo_unreserve(rdev->wb.wb_obj);
357 }
358 radeon_bo_unref(&rdev->wb.wb_obj);
359 rdev->wb.wb = NULL;
360 rdev->wb.wb_obj = NULL;
361 }
362}
363
364
365
366
367
368
369
370
371
372
373int radeon_wb_init(struct radeon_device *rdev)
374{
375 int r;
376
377 if (rdev->wb.wb_obj == NULL) {
378 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
379 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
380 if (r) {
381 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
382 return r;
383 }
384 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
385 if (unlikely(r != 0)) {
386 radeon_wb_fini(rdev);
387 return r;
388 }
389 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
390 &rdev->wb.gpu_addr);
391 if (r) {
392 radeon_bo_unreserve(rdev->wb.wb_obj);
393 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
394 radeon_wb_fini(rdev);
395 return r;
396 }
397 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
398 radeon_bo_unreserve(rdev->wb.wb_obj);
399 if (r) {
400 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
401 radeon_wb_fini(rdev);
402 return r;
403 }
404 }
405
406
407 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
408
409 rdev->wb.use_event = false;
410
411 if (radeon_no_wb == 1) {
412 rdev->wb.enabled = false;
413 } else {
414 if (rdev->flags & RADEON_IS_AGP) {
415
416 rdev->wb.enabled = false;
417 } else if (rdev->family < CHIP_R300) {
418
419 rdev->wb.enabled = false;
420 } else {
421 rdev->wb.enabled = true;
422
423 if (rdev->family >= CHIP_R600) {
424 rdev->wb.use_event = true;
425 }
426 }
427 }
428
429 if (rdev->family >= CHIP_PALM) {
430 rdev->wb.enabled = true;
431 rdev->wb.use_event = true;
432 }
433
434 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
435
436 return 0;
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
481{
482 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
483
484 mc->vram_start = base;
485 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
486 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
487 mc->real_vram_size = mc->aper_size;
488 mc->mc_vram_size = mc->aper_size;
489 }
490 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
491 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
492 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
493 mc->real_vram_size = mc->aper_size;
494 mc->mc_vram_size = mc->aper_size;
495 }
496 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
497 if (limit && limit < mc->real_vram_size)
498 mc->real_vram_size = limit;
499 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
500 mc->mc_vram_size >> 20, mc->vram_start,
501 mc->vram_end, mc->real_vram_size >> 20);
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
517{
518 u64 size_af, size_bf;
519
520 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
521 size_bf = mc->vram_start & ~mc->gtt_base_align;
522 if (size_bf > size_af) {
523 if (mc->gtt_size > size_bf) {
524 dev_warn(rdev->dev, "limiting GTT\n");
525 mc->gtt_size = size_bf;
526 }
527 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
528 } else {
529 if (mc->gtt_size > size_af) {
530 dev_warn(rdev->dev, "limiting GTT\n");
531 mc->gtt_size = size_af;
532 }
533 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
534 }
535 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
536 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
537 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
538}
539
540
541
542
543
544
545
546
547
548
549
550
551
552bool radeon_card_posted(struct radeon_device *rdev)
553{
554 uint32_t reg;
555
556
557 if (efi_enabled(EFI_BOOT) &&
558 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
559 (rdev->family < CHIP_R600))
560 return false;
561
562 if (ASIC_IS_NODCE(rdev))
563 goto check_memsize;
564
565
566 if (ASIC_IS_DCE4(rdev)) {
567 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
568 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
569 if (rdev->num_crtc >= 4) {
570 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
571 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
572 }
573 if (rdev->num_crtc >= 6) {
574 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
575 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
576 }
577 if (reg & EVERGREEN_CRTC_MASTER_EN)
578 return true;
579 } else if (ASIC_IS_AVIVO(rdev)) {
580 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
581 RREG32(AVIVO_D2CRTC_CONTROL);
582 if (reg & AVIVO_CRTC_EN) {
583 return true;
584 }
585 } else {
586 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
587 RREG32(RADEON_CRTC2_GEN_CNTL);
588 if (reg & RADEON_CRTC_EN) {
589 return true;
590 }
591 }
592
593check_memsize:
594
595 if (rdev->family >= CHIP_R600)
596 reg = RREG32(R600_CONFIG_MEMSIZE);
597 else
598 reg = RREG32(RADEON_CONFIG_MEMSIZE);
599
600 if (reg)
601 return true;
602
603 return false;
604
605}
606
607
608
609
610
611
612
613
614
615void radeon_update_bandwidth_info(struct radeon_device *rdev)
616{
617 fixed20_12 a;
618 u32 sclk = rdev->pm.current_sclk;
619 u32 mclk = rdev->pm.current_mclk;
620
621
622 a.full = dfixed_const(100);
623 rdev->pm.sclk.full = dfixed_const(sclk);
624 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
625 rdev->pm.mclk.full = dfixed_const(mclk);
626 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
627
628 if (rdev->flags & RADEON_IS_IGP) {
629 a.full = dfixed_const(16);
630
631 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
632 }
633}
634
635
636
637
638
639
640
641
642
643
644bool radeon_boot_test_post_card(struct radeon_device *rdev)
645{
646 if (radeon_card_posted(rdev))
647 return true;
648
649 if (rdev->bios) {
650 DRM_INFO("GPU not posted. posting now...\n");
651 if (rdev->is_atom_bios)
652 atom_asic_init(rdev->mode_info.atom_context);
653 else
654 radeon_combios_asic_init(rdev->ddev);
655 return true;
656 } else {
657 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
658 return false;
659 }
660}
661
662
663
664
665
666
667
668
669
670
671
672int radeon_dummy_page_init(struct radeon_device *rdev)
673{
674 if (rdev->dummy_page.page)
675 return 0;
676 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
677 if (rdev->dummy_page.page == NULL)
678 return -ENOMEM;
679 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
680 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
681 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
682 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
683 __free_page(rdev->dummy_page.page);
684 rdev->dummy_page.page = NULL;
685 return -ENOMEM;
686 }
687 return 0;
688}
689
690
691
692
693
694
695
696
697void radeon_dummy_page_fini(struct radeon_device *rdev)
698{
699 if (rdev->dummy_page.page == NULL)
700 return;
701 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
702 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
703 __free_page(rdev->dummy_page.page);
704 rdev->dummy_page.page = NULL;
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
727{
728 struct radeon_device *rdev = info->dev->dev_private;
729 uint32_t r;
730
731 r = rdev->pll_rreg(rdev, reg);
732 return r;
733}
734
735
736
737
738
739
740
741
742
743
744static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
745{
746 struct radeon_device *rdev = info->dev->dev_private;
747
748 rdev->pll_wreg(rdev, reg, val);
749}
750
751
752
753
754
755
756
757
758
759
760static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
761{
762 struct radeon_device *rdev = info->dev->dev_private;
763 uint32_t r;
764
765 r = rdev->mc_rreg(rdev, reg);
766 return r;
767}
768
769
770
771
772
773
774
775
776
777
778static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
779{
780 struct radeon_device *rdev = info->dev->dev_private;
781
782 rdev->mc_wreg(rdev, reg, val);
783}
784
785
786
787
788
789
790
791
792
793
794static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
795{
796 struct radeon_device *rdev = info->dev->dev_private;
797
798 WREG32(reg*4, val);
799}
800
801
802
803
804
805
806
807
808
809
810static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
811{
812 struct radeon_device *rdev = info->dev->dev_private;
813 uint32_t r;
814
815 r = RREG32(reg*4);
816 return r;
817}
818
819
820
821
822
823
824
825
826
827
828static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
829{
830 struct radeon_device *rdev = info->dev->dev_private;
831
832 WREG32_IO(reg*4, val);
833}
834
835
836
837
838
839
840
841
842
843
844static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
845{
846 struct radeon_device *rdev = info->dev->dev_private;
847 uint32_t r;
848
849 r = RREG32_IO(reg*4);
850 return r;
851}
852
853
854
855
856
857
858
859
860
861
862
863int radeon_atombios_init(struct radeon_device *rdev)
864{
865 struct card_info *atom_card_info =
866 kzalloc(sizeof(struct card_info), GFP_KERNEL);
867
868 if (!atom_card_info)
869 return -ENOMEM;
870
871 rdev->mode_info.atom_card_info = atom_card_info;
872 atom_card_info->dev = rdev->ddev;
873 atom_card_info->reg_read = cail_reg_read;
874 atom_card_info->reg_write = cail_reg_write;
875
876 if (rdev->rio_mem) {
877 atom_card_info->ioreg_read = cail_ioreg_read;
878 atom_card_info->ioreg_write = cail_ioreg_write;
879 } else {
880 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
881 atom_card_info->ioreg_read = cail_reg_read;
882 atom_card_info->ioreg_write = cail_reg_write;
883 }
884 atom_card_info->mc_read = cail_mc_read;
885 atom_card_info->mc_write = cail_mc_write;
886 atom_card_info->pll_read = cail_pll_read;
887 atom_card_info->pll_write = cail_pll_write;
888
889 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
890 if (!rdev->mode_info.atom_context) {
891 radeon_atombios_fini(rdev);
892 return -ENOMEM;
893 }
894
895 mutex_init(&rdev->mode_info.atom_context->mutex);
896 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
897 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
898 return 0;
899}
900
901
902
903
904
905
906
907
908
909
910void radeon_atombios_fini(struct radeon_device *rdev)
911{
912 if (rdev->mode_info.atom_context) {
913 kfree(rdev->mode_info.atom_context->scratch);
914 }
915 kfree(rdev->mode_info.atom_context);
916 rdev->mode_info.atom_context = NULL;
917 kfree(rdev->mode_info.atom_card_info);
918 rdev->mode_info.atom_card_info = NULL;
919}
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937int radeon_combios_init(struct radeon_device *rdev)
938{
939 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
940 return 0;
941}
942
943
944
945
946
947
948
949
950
951void radeon_combios_fini(struct radeon_device *rdev)
952{
953}
954
955
956
957
958
959
960
961
962
963
964
965static unsigned int radeon_vga_set_decode(void *cookie, bool state)
966{
967 struct radeon_device *rdev = cookie;
968 radeon_vga_set_state(rdev, state);
969 if (state)
970 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
971 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
972 else
973 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
974}
975
976
977
978
979
980
981
982
983
984static bool radeon_check_pot_argument(int arg)
985{
986 return (arg & (arg - 1)) == 0;
987}
988
989
990
991
992
993
994
995
996
997static void radeon_check_arguments(struct radeon_device *rdev)
998{
999
1000 if (!radeon_check_pot_argument(radeon_vram_limit)) {
1001 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1002 radeon_vram_limit);
1003 radeon_vram_limit = 0;
1004 }
1005
1006 if (radeon_gart_size == -1) {
1007
1008 if (rdev->family >= CHIP_RV770)
1009 radeon_gart_size = 1024;
1010 else
1011 radeon_gart_size = 512;
1012 }
1013
1014 if (radeon_gart_size < 32) {
1015 dev_warn(rdev->dev, "gart size (%d) too small\n",
1016 radeon_gart_size);
1017 if (rdev->family >= CHIP_RV770)
1018 radeon_gart_size = 1024;
1019 else
1020 radeon_gart_size = 512;
1021 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1022 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1023 radeon_gart_size);
1024 if (rdev->family >= CHIP_RV770)
1025 radeon_gart_size = 1024;
1026 else
1027 radeon_gart_size = 512;
1028 }
1029 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1030
1031
1032 switch (radeon_agpmode) {
1033 case -1:
1034 case 0:
1035 case 1:
1036 case 2:
1037 case 4:
1038 case 8:
1039 break;
1040 default:
1041 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1042 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1043 radeon_agpmode = 0;
1044 break;
1045 }
1046}
1047
1048
1049
1050
1051
1052
1053
1054static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1055{
1056
1057
1058 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1059 pdev->subsystem_device == 0x00e2) {
1060 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1061 return true;
1062 }
1063
1064 return false;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1077{
1078 struct drm_device *dev = pci_get_drvdata(pdev);
1079 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1080 if (state == VGA_SWITCHEROO_ON) {
1081 unsigned d3_delay = dev->pdev->d3_delay;
1082
1083 printk(KERN_INFO "radeon: switched on\n");
1084
1085 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1086
1087 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1088 dev->pdev->d3_delay = 20;
1089
1090 radeon_resume_kms(dev);
1091
1092 dev->pdev->d3_delay = d3_delay;
1093
1094 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1095 drm_kms_helper_poll_enable(dev);
1096 } else {
1097 printk(KERN_INFO "radeon: switched off\n");
1098 drm_kms_helper_poll_disable(dev);
1099 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1100 radeon_suspend_kms(dev, pmm);
1101 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1102 }
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1115{
1116 struct drm_device *dev = pci_get_drvdata(pdev);
1117 bool can_switch;
1118
1119 spin_lock(&dev->count_lock);
1120 can_switch = (dev->open_count == 0);
1121 spin_unlock(&dev->count_lock);
1122 return can_switch;
1123}
1124
1125static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1126 .set_gpu_state = radeon_switcheroo_set_state,
1127 .reprobe = NULL,
1128 .can_switch = radeon_switcheroo_can_switch,
1129};
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143int radeon_device_init(struct radeon_device *rdev,
1144 struct drm_device *ddev,
1145 struct pci_dev *pdev,
1146 uint32_t flags)
1147{
1148 int r, i;
1149 int dma_bits;
1150
1151 rdev->shutdown = false;
1152 rdev->dev = &pdev->dev;
1153 rdev->ddev = ddev;
1154 rdev->pdev = pdev;
1155 rdev->flags = flags;
1156 rdev->family = flags & RADEON_FAMILY_MASK;
1157 rdev->is_atom_bios = false;
1158 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1159 rdev->mc.gtt_size = 512 * 1024 * 1024;
1160 rdev->accel_working = false;
1161
1162 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1163 rdev->ring[i].idx = i;
1164 }
1165
1166 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1167 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1168 pdev->subsystem_vendor, pdev->subsystem_device);
1169
1170
1171
1172 mutex_init(&rdev->ring_lock);
1173 mutex_init(&rdev->dc_hw_i2c_mutex);
1174 atomic_set(&rdev->ih.lock, 0);
1175 mutex_init(&rdev->gem.mutex);
1176 mutex_init(&rdev->pm.mutex);
1177 mutex_init(&rdev->gpu_clock_mutex);
1178 mutex_init(&rdev->srbm_mutex);
1179 init_rwsem(&rdev->pm.mclk_lock);
1180 init_rwsem(&rdev->exclusive_lock);
1181 init_waitqueue_head(&rdev->irq.vblank_queue);
1182 r = radeon_gem_init(rdev);
1183 if (r)
1184 return r;
1185
1186 mutex_init(&rdev->vm_manager.lock);
1187
1188
1189
1190
1191 rdev->vm_manager.max_pfn = 1 << 20;
1192 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
1193
1194
1195 r = radeon_asic_init(rdev);
1196 if (r)
1197 return r;
1198 radeon_check_arguments(rdev);
1199
1200
1201
1202
1203 if ((rdev->family >= CHIP_RS400) &&
1204 (rdev->flags & RADEON_IS_IGP)) {
1205 rdev->flags &= ~RADEON_IS_AGP;
1206 }
1207
1208 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1209 radeon_agp_disable(rdev);
1210 }
1211
1212
1213
1214
1215
1216 if (rdev->family >= CHIP_CAYMAN)
1217 rdev->mc.mc_mask = 0xffffffffffULL;
1218 else if (rdev->family >= CHIP_CEDAR)
1219 rdev->mc.mc_mask = 0xfffffffffULL;
1220 else
1221 rdev->mc.mc_mask = 0xffffffffULL;
1222
1223
1224
1225
1226
1227
1228
1229 rdev->need_dma32 = false;
1230 if (rdev->flags & RADEON_IS_AGP)
1231 rdev->need_dma32 = true;
1232 if ((rdev->flags & RADEON_IS_PCI) &&
1233 (rdev->family <= CHIP_RS740))
1234 rdev->need_dma32 = true;
1235
1236 dma_bits = rdev->need_dma32 ? 32 : 40;
1237 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1238 if (r) {
1239 rdev->need_dma32 = true;
1240 dma_bits = 32;
1241 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1242 }
1243 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1244 if (r) {
1245 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1246 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1247 }
1248
1249
1250
1251 spin_lock_init(&rdev->mmio_idx_lock);
1252 spin_lock_init(&rdev->smc_idx_lock);
1253 spin_lock_init(&rdev->pll_idx_lock);
1254 spin_lock_init(&rdev->mc_idx_lock);
1255 spin_lock_init(&rdev->pcie_idx_lock);
1256 spin_lock_init(&rdev->pciep_idx_lock);
1257 spin_lock_init(&rdev->pif_idx_lock);
1258 spin_lock_init(&rdev->cg_idx_lock);
1259 spin_lock_init(&rdev->uvd_idx_lock);
1260 spin_lock_init(&rdev->rcu_idx_lock);
1261 spin_lock_init(&rdev->didt_idx_lock);
1262 spin_lock_init(&rdev->end_idx_lock);
1263 if (rdev->family >= CHIP_BONAIRE) {
1264 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1265 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1266 } else {
1267 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1268 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1269 }
1270 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1271 if (rdev->rmmio == NULL) {
1272 return -ENOMEM;
1273 }
1274 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1275 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1276
1277
1278 if (rdev->family >= CHIP_BONAIRE)
1279 radeon_doorbell_init(rdev);
1280
1281
1282 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1283 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1284 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1285 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1286 break;
1287 }
1288 }
1289 if (rdev->rio_mem == NULL)
1290 DRM_ERROR("Unable to find PCI I/O BAR\n");
1291
1292
1293
1294
1295 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1296 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
1297
1298 r = radeon_init(rdev);
1299 if (r)
1300 return r;
1301
1302 r = radeon_ib_ring_tests(rdev);
1303 if (r)
1304 DRM_ERROR("ib ring test failed (%d).\n", r);
1305
1306 r = radeon_gem_debugfs_init(rdev);
1307 if (r) {
1308 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1309 }
1310
1311 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1312
1313
1314
1315 radeon_asic_reset(rdev);
1316 radeon_fini(rdev);
1317 radeon_agp_disable(rdev);
1318 r = radeon_init(rdev);
1319 if (r)
1320 return r;
1321 }
1322 if ((radeon_testing & 1)) {
1323 if (rdev->accel_working)
1324 radeon_test_moves(rdev);
1325 else
1326 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1327 }
1328 if ((radeon_testing & 2)) {
1329 if (rdev->accel_working)
1330 radeon_test_syncing(rdev);
1331 else
1332 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1333 }
1334 if (radeon_benchmarking) {
1335 if (rdev->accel_working)
1336 radeon_benchmark(rdev, radeon_benchmarking);
1337 else
1338 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1339 }
1340 return 0;
1341}
1342
1343static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353void radeon_device_fini(struct radeon_device *rdev)
1354{
1355 DRM_INFO("radeon: finishing device.\n");
1356 rdev->shutdown = true;
1357
1358 radeon_bo_evict_vram(rdev);
1359 radeon_fini(rdev);
1360 vga_switcheroo_unregister_client(rdev->pdev);
1361 vga_client_register(rdev->pdev, NULL, NULL, NULL);
1362 if (rdev->rio_mem)
1363 pci_iounmap(rdev->pdev, rdev->rio_mem);
1364 rdev->rio_mem = NULL;
1365 iounmap(rdev->rmmio);
1366 rdev->rmmio = NULL;
1367 if (rdev->family >= CHIP_BONAIRE)
1368 radeon_doorbell_fini(rdev);
1369 radeon_debugfs_remove_files(rdev);
1370}
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1387{
1388 struct radeon_device *rdev;
1389 struct drm_crtc *crtc;
1390 struct drm_connector *connector;
1391 int i, r;
1392 bool force_completion = false;
1393
1394 if (dev == NULL || dev->dev_private == NULL) {
1395 return -ENODEV;
1396 }
1397 if (state.event == PM_EVENT_PRETHAW) {
1398 return 0;
1399 }
1400 rdev = dev->dev_private;
1401
1402 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1403 return 0;
1404
1405 drm_kms_helper_poll_disable(dev);
1406
1407
1408 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1409 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1410 }
1411
1412
1413 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1414 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
1415 struct radeon_bo *robj;
1416
1417 if (rfb == NULL || rfb->obj == NULL) {
1418 continue;
1419 }
1420 robj = gem_to_radeon_bo(rfb->obj);
1421
1422 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1423 r = radeon_bo_reserve(robj, false);
1424 if (r == 0) {
1425 radeon_bo_unpin(robj);
1426 radeon_bo_unreserve(robj);
1427 }
1428 }
1429 }
1430
1431 radeon_bo_evict_vram(rdev);
1432
1433 mutex_lock(&rdev->ring_lock);
1434
1435 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1436 r = radeon_fence_wait_empty_locked(rdev, i);
1437 if (r) {
1438
1439 force_completion = true;
1440 }
1441 }
1442 if (force_completion) {
1443 radeon_fence_driver_force_completion(rdev);
1444 }
1445 mutex_unlock(&rdev->ring_lock);
1446
1447 radeon_save_bios_scratch_regs(rdev);
1448
1449 radeon_pm_suspend(rdev);
1450 radeon_suspend(rdev);
1451 radeon_hpd_fini(rdev);
1452
1453 radeon_bo_evict_vram(rdev);
1454
1455 radeon_agp_suspend(rdev);
1456
1457 pci_save_state(dev->pdev);
1458 if (state.event == PM_EVENT_SUSPEND) {
1459
1460 pci_disable_device(dev->pdev);
1461 pci_set_power_state(dev->pdev, PCI_D3hot);
1462 }
1463 console_lock();
1464 radeon_fbdev_set_suspend(rdev, 1);
1465 console_unlock();
1466 return 0;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478int radeon_resume_kms(struct drm_device *dev)
1479{
1480 struct drm_connector *connector;
1481 struct radeon_device *rdev = dev->dev_private;
1482 int r;
1483
1484 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1485 return 0;
1486
1487 console_lock();
1488 pci_set_power_state(dev->pdev, PCI_D0);
1489 pci_restore_state(dev->pdev);
1490 if (pci_enable_device(dev->pdev)) {
1491 console_unlock();
1492 return -1;
1493 }
1494
1495 radeon_agp_resume(rdev);
1496 radeon_resume(rdev);
1497
1498 r = radeon_ib_ring_tests(rdev);
1499 if (r)
1500 DRM_ERROR("ib ring test failed (%d).\n", r);
1501
1502 radeon_pm_resume(rdev);
1503 radeon_restore_bios_scratch_regs(rdev);
1504
1505 radeon_fbdev_set_suspend(rdev, 0);
1506 console_unlock();
1507
1508
1509 if (rdev->is_atom_bios) {
1510 radeon_atom_encoder_init(rdev);
1511 radeon_atom_disp_eng_pll_init(rdev);
1512
1513 if (rdev->mode_info.bl_encoder) {
1514 u8 bl_level = radeon_get_backlight_level(rdev,
1515 rdev->mode_info.bl_encoder);
1516 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1517 bl_level);
1518 }
1519 }
1520
1521 radeon_hpd_init(rdev);
1522
1523 drm_helper_resume_force_mode(dev);
1524
1525 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1526 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1527 }
1528
1529 drm_kms_helper_poll_enable(dev);
1530 return 0;
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541int radeon_gpu_reset(struct radeon_device *rdev)
1542{
1543 unsigned ring_sizes[RADEON_NUM_RINGS];
1544 uint32_t *ring_data[RADEON_NUM_RINGS];
1545
1546 bool saved = false;
1547
1548 int i, r;
1549 int resched;
1550
1551 down_write(&rdev->exclusive_lock);
1552 radeon_save_bios_scratch_regs(rdev);
1553
1554 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1555 radeon_pm_suspend(rdev);
1556 radeon_suspend(rdev);
1557
1558 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1559 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1560 &ring_data[i]);
1561 if (ring_sizes[i]) {
1562 saved = true;
1563 dev_info(rdev->dev, "Saved %d dwords of commands "
1564 "on ring %d.\n", ring_sizes[i], i);
1565 }
1566 }
1567
1568retry:
1569 r = radeon_asic_reset(rdev);
1570 if (!r) {
1571 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1572 radeon_resume(rdev);
1573 }
1574
1575 radeon_restore_bios_scratch_regs(rdev);
1576
1577 if (!r) {
1578 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1579 radeon_ring_restore(rdev, &rdev->ring[i],
1580 ring_sizes[i], ring_data[i]);
1581 ring_sizes[i] = 0;
1582 ring_data[i] = NULL;
1583 }
1584
1585 r = radeon_ib_ring_tests(rdev);
1586 if (r) {
1587 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1588 if (saved) {
1589 saved = false;
1590 radeon_suspend(rdev);
1591 goto retry;
1592 }
1593 }
1594 } else {
1595 radeon_fence_driver_force_completion(rdev);
1596 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1597 kfree(ring_data[i]);
1598 }
1599 }
1600
1601 radeon_pm_resume(rdev);
1602 drm_helper_resume_force_mode(rdev->ddev);
1603
1604 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1605 if (r) {
1606
1607 dev_info(rdev->dev, "GPU reset failed\n");
1608 }
1609
1610 up_write(&rdev->exclusive_lock);
1611 return r;
1612}
1613
1614
1615
1616
1617
1618int radeon_debugfs_add_files(struct radeon_device *rdev,
1619 struct drm_info_list *files,
1620 unsigned nfiles)
1621{
1622 unsigned i;
1623
1624 for (i = 0; i < rdev->debugfs_count; i++) {
1625 if (rdev->debugfs[i].files == files) {
1626
1627 return 0;
1628 }
1629 }
1630
1631 i = rdev->debugfs_count + 1;
1632 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1633 DRM_ERROR("Reached maximum number of debugfs components.\n");
1634 DRM_ERROR("Report so we increase "
1635 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1636 return -EINVAL;
1637 }
1638 rdev->debugfs[rdev->debugfs_count].files = files;
1639 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1640 rdev->debugfs_count = i;
1641#if defined(CONFIG_DEBUG_FS)
1642 drm_debugfs_create_files(files, nfiles,
1643 rdev->ddev->control->debugfs_root,
1644 rdev->ddev->control);
1645 drm_debugfs_create_files(files, nfiles,
1646 rdev->ddev->primary->debugfs_root,
1647 rdev->ddev->primary);
1648#endif
1649 return 0;
1650}
1651
1652static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1653{
1654#if defined(CONFIG_DEBUG_FS)
1655 unsigned i;
1656
1657 for (i = 0; i < rdev->debugfs_count; i++) {
1658 drm_debugfs_remove_files(rdev->debugfs[i].files,
1659 rdev->debugfs[i].num_files,
1660 rdev->ddev->control);
1661 drm_debugfs_remove_files(rdev->debugfs[i].files,
1662 rdev->debugfs[i].num_files,
1663 rdev->ddev->primary);
1664 }
1665#endif
1666}
1667
1668#if defined(CONFIG_DEBUG_FS)
1669int radeon_debugfs_init(struct drm_minor *minor)
1670{
1671 return 0;
1672}
1673
1674void radeon_debugfs_cleanup(struct drm_minor *minor)
1675{
1676}
1677#endif
1678