1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/console.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
35#include "radeon_reg.h"
36#include "radeon.h"
37#include "atom.h"
38
39static const char radeon_family_name[][16] = {
40 "R100",
41 "RV100",
42 "RS100",
43 "RV200",
44 "RS200",
45 "R200",
46 "RV250",
47 "RS300",
48 "RV280",
49 "R300",
50 "R350",
51 "RV350",
52 "RV380",
53 "R420",
54 "R423",
55 "RV410",
56 "RS400",
57 "RS480",
58 "RS600",
59 "RS690",
60 "RS740",
61 "RV515",
62 "R520",
63 "RV530",
64 "RV560",
65 "RV570",
66 "R580",
67 "R600",
68 "RV610",
69 "RV630",
70 "RV670",
71 "RV620",
72 "RV635",
73 "RS780",
74 "RS880",
75 "RV770",
76 "RV730",
77 "RV710",
78 "RV740",
79 "CEDAR",
80 "REDWOOD",
81 "JUNIPER",
82 "CYPRESS",
83 "HEMLOCK",
84 "PALM",
85 "BARTS",
86 "TURKS",
87 "CAICOS",
88 "LAST",
89};
90
91
92
93
94void radeon_surface_init(struct radeon_device *rdev)
95{
96
97 if (rdev->family < CHIP_R600) {
98 int i;
99
100 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
101 if (rdev->surface_regs[i].bo)
102 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
103 else
104 radeon_clear_surface_reg(rdev, i);
105 }
106
107 WREG32(RADEON_SURFACE_CNTL, 0);
108 }
109}
110
111
112
113
114void radeon_scratch_init(struct radeon_device *rdev)
115{
116 int i;
117
118
119 if (rdev->family < CHIP_R300) {
120 rdev->scratch.num_reg = 5;
121 } else {
122 rdev->scratch.num_reg = 7;
123 }
124 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
125 for (i = 0; i < rdev->scratch.num_reg; i++) {
126 rdev->scratch.free[i] = true;
127 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
128 }
129}
130
131int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
132{
133 int i;
134
135 for (i = 0; i < rdev->scratch.num_reg; i++) {
136 if (rdev->scratch.free[i]) {
137 rdev->scratch.free[i] = false;
138 *reg = rdev->scratch.reg[i];
139 return 0;
140 }
141 }
142 return -EINVAL;
143}
144
145void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
146{
147 int i;
148
149 for (i = 0; i < rdev->scratch.num_reg; i++) {
150 if (rdev->scratch.reg[i] == reg) {
151 rdev->scratch.free[i] = true;
152 return;
153 }
154 }
155}
156
157void radeon_wb_disable(struct radeon_device *rdev)
158{
159 int r;
160
161 if (rdev->wb.wb_obj) {
162 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
163 if (unlikely(r != 0))
164 return;
165 radeon_bo_kunmap(rdev->wb.wb_obj);
166 radeon_bo_unpin(rdev->wb.wb_obj);
167 radeon_bo_unreserve(rdev->wb.wb_obj);
168 }
169 rdev->wb.enabled = false;
170}
171
172void radeon_wb_fini(struct radeon_device *rdev)
173{
174 radeon_wb_disable(rdev);
175 if (rdev->wb.wb_obj) {
176 radeon_bo_unref(&rdev->wb.wb_obj);
177 rdev->wb.wb = NULL;
178 rdev->wb.wb_obj = NULL;
179 }
180}
181
182int radeon_wb_init(struct radeon_device *rdev)
183{
184 int r;
185
186 if (rdev->wb.wb_obj == NULL) {
187 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
188 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
189 if (r) {
190 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
191 return r;
192 }
193 }
194 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
195 if (unlikely(r != 0)) {
196 radeon_wb_fini(rdev);
197 return r;
198 }
199 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
200 &rdev->wb.gpu_addr);
201 if (r) {
202 radeon_bo_unreserve(rdev->wb.wb_obj);
203 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
204 radeon_wb_fini(rdev);
205 return r;
206 }
207 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
208 radeon_bo_unreserve(rdev->wb.wb_obj);
209 if (r) {
210 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
211 radeon_wb_fini(rdev);
212 return r;
213 }
214
215
216 rdev->wb.use_event = false;
217
218 if (radeon_no_wb == 1)
219 rdev->wb.enabled = false;
220 else {
221
222 if (rdev->flags & RADEON_IS_AGP) {
223 rdev->wb.enabled = false;
224 } else {
225 rdev->wb.enabled = true;
226
227 if (rdev->family >= CHIP_R600)
228 rdev->wb.use_event = true;
229 }
230 }
231
232 if (ASIC_IS_DCE5(rdev)) {
233 rdev->wb.enabled = true;
234 rdev->wb.use_event = true;
235 }
236
237 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
238
239 return 0;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
284{
285 mc->vram_start = base;
286 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
287 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
288 mc->real_vram_size = mc->aper_size;
289 mc->mc_vram_size = mc->aper_size;
290 }
291 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
292 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
293 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
294 mc->real_vram_size = mc->aper_size;
295 mc->mc_vram_size = mc->aper_size;
296 }
297 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
298 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
299 mc->mc_vram_size >> 20, mc->vram_start,
300 mc->vram_end, mc->real_vram_size >> 20);
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
316{
317 u64 size_af, size_bf;
318
319 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
320 size_bf = mc->vram_start & ~mc->gtt_base_align;
321 if (size_bf > size_af) {
322 if (mc->gtt_size > size_bf) {
323 dev_warn(rdev->dev, "limiting GTT\n");
324 mc->gtt_size = size_bf;
325 }
326 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
327 } else {
328 if (mc->gtt_size > size_af) {
329 dev_warn(rdev->dev, "limiting GTT\n");
330 mc->gtt_size = size_af;
331 }
332 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
333 }
334 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
335 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
336 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
337}
338
339
340
341
342bool radeon_card_posted(struct radeon_device *rdev)
343{
344 uint32_t reg;
345
346
347 if (ASIC_IS_DCE41(rdev)) {
348 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
349 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
350 if (reg & EVERGREEN_CRTC_MASTER_EN)
351 return true;
352 } else if (ASIC_IS_DCE4(rdev)) {
353 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
354 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
355 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
356 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
357 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
358 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
359 if (reg & EVERGREEN_CRTC_MASTER_EN)
360 return true;
361 } else if (ASIC_IS_AVIVO(rdev)) {
362 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
363 RREG32(AVIVO_D2CRTC_CONTROL);
364 if (reg & AVIVO_CRTC_EN) {
365 return true;
366 }
367 } else {
368 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
369 RREG32(RADEON_CRTC2_GEN_CNTL);
370 if (reg & RADEON_CRTC_EN) {
371 return true;
372 }
373 }
374
375
376 if (rdev->family >= CHIP_R600)
377 reg = RREG32(R600_CONFIG_MEMSIZE);
378 else
379 reg = RREG32(RADEON_CONFIG_MEMSIZE);
380
381 if (reg)
382 return true;
383
384 return false;
385
386}
387
388void radeon_update_bandwidth_info(struct radeon_device *rdev)
389{
390 fixed20_12 a;
391 u32 sclk = rdev->pm.current_sclk;
392 u32 mclk = rdev->pm.current_mclk;
393
394
395 a.full = dfixed_const(100);
396 rdev->pm.sclk.full = dfixed_const(sclk);
397 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
398 rdev->pm.mclk.full = dfixed_const(mclk);
399 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
400
401 if (rdev->flags & RADEON_IS_IGP) {
402 a.full = dfixed_const(16);
403
404 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
405 }
406}
407
408bool radeon_boot_test_post_card(struct radeon_device *rdev)
409{
410 if (radeon_card_posted(rdev))
411 return true;
412
413 if (rdev->bios) {
414 DRM_INFO("GPU not posted. posting now...\n");
415 if (rdev->is_atom_bios)
416 atom_asic_init(rdev->mode_info.atom_context);
417 else
418 radeon_combios_asic_init(rdev->ddev);
419 return true;
420 } else {
421 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
422 return false;
423 }
424}
425
426int radeon_dummy_page_init(struct radeon_device *rdev)
427{
428 if (rdev->dummy_page.page)
429 return 0;
430 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
431 if (rdev->dummy_page.page == NULL)
432 return -ENOMEM;
433 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
434 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
435 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
436 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
437 __free_page(rdev->dummy_page.page);
438 rdev->dummy_page.page = NULL;
439 return -ENOMEM;
440 }
441 return 0;
442}
443
444void radeon_dummy_page_fini(struct radeon_device *rdev)
445{
446 if (rdev->dummy_page.page == NULL)
447 return;
448 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
449 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
450 __free_page(rdev->dummy_page.page);
451 rdev->dummy_page.page = NULL;
452}
453
454
455
456static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
457{
458 struct radeon_device *rdev = info->dev->dev_private;
459 uint32_t r;
460
461 r = rdev->pll_rreg(rdev, reg);
462 return r;
463}
464
465static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
466{
467 struct radeon_device *rdev = info->dev->dev_private;
468
469 rdev->pll_wreg(rdev, reg, val);
470}
471
472static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
473{
474 struct radeon_device *rdev = info->dev->dev_private;
475 uint32_t r;
476
477 r = rdev->mc_rreg(rdev, reg);
478 return r;
479}
480
481static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
482{
483 struct radeon_device *rdev = info->dev->dev_private;
484
485 rdev->mc_wreg(rdev, reg, val);
486}
487
488static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
489{
490 struct radeon_device *rdev = info->dev->dev_private;
491
492 WREG32(reg*4, val);
493}
494
495static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
496{
497 struct radeon_device *rdev = info->dev->dev_private;
498 uint32_t r;
499
500 r = RREG32(reg*4);
501 return r;
502}
503
504static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
505{
506 struct radeon_device *rdev = info->dev->dev_private;
507
508 WREG32_IO(reg*4, val);
509}
510
511static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
512{
513 struct radeon_device *rdev = info->dev->dev_private;
514 uint32_t r;
515
516 r = RREG32_IO(reg*4);
517 return r;
518}
519
520int radeon_atombios_init(struct radeon_device *rdev)
521{
522 struct card_info *atom_card_info =
523 kzalloc(sizeof(struct card_info), GFP_KERNEL);
524
525 if (!atom_card_info)
526 return -ENOMEM;
527
528 rdev->mode_info.atom_card_info = atom_card_info;
529 atom_card_info->dev = rdev->ddev;
530 atom_card_info->reg_read = cail_reg_read;
531 atom_card_info->reg_write = cail_reg_write;
532
533 if (rdev->rio_mem) {
534 atom_card_info->ioreg_read = cail_ioreg_read;
535 atom_card_info->ioreg_write = cail_ioreg_write;
536 } else {
537 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
538 atom_card_info->ioreg_read = cail_reg_read;
539 atom_card_info->ioreg_write = cail_reg_write;
540 }
541 atom_card_info->mc_read = cail_mc_read;
542 atom_card_info->mc_write = cail_mc_write;
543 atom_card_info->pll_read = cail_pll_read;
544 atom_card_info->pll_write = cail_pll_write;
545
546 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
547 mutex_init(&rdev->mode_info.atom_context->mutex);
548 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
549 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
550 return 0;
551}
552
553void radeon_atombios_fini(struct radeon_device *rdev)
554{
555 if (rdev->mode_info.atom_context) {
556 kfree(rdev->mode_info.atom_context->scratch);
557 kfree(rdev->mode_info.atom_context);
558 }
559 kfree(rdev->mode_info.atom_card_info);
560}
561
562int radeon_combios_init(struct radeon_device *rdev)
563{
564 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
565 return 0;
566}
567
568void radeon_combios_fini(struct radeon_device *rdev)
569{
570}
571
572
573static unsigned int radeon_vga_set_decode(void *cookie, bool state)
574{
575 struct radeon_device *rdev = cookie;
576 radeon_vga_set_state(rdev, state);
577 if (state)
578 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
579 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
580 else
581 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
582}
583
584void radeon_check_arguments(struct radeon_device *rdev)
585{
586
587 switch (radeon_vram_limit) {
588 case 0:
589 case 4:
590 case 8:
591 case 16:
592 case 32:
593 case 64:
594 case 128:
595 case 256:
596 case 512:
597 case 1024:
598 case 2048:
599 case 4096:
600 break;
601 default:
602 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
603 radeon_vram_limit);
604 radeon_vram_limit = 0;
605 break;
606 }
607 radeon_vram_limit = radeon_vram_limit << 20;
608
609 switch (radeon_gart_size) {
610 case 4:
611 case 8:
612 case 16:
613 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
614 radeon_gart_size);
615 radeon_gart_size = 512;
616 break;
617 case 32:
618 case 64:
619 case 128:
620 case 256:
621 case 512:
622 case 1024:
623 case 2048:
624 case 4096:
625 break;
626 default:
627 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
628 radeon_gart_size);
629 radeon_gart_size = 512;
630 break;
631 }
632 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
633
634 switch (radeon_agpmode) {
635 case -1:
636 case 0:
637 case 1:
638 case 2:
639 case 4:
640 case 8:
641 break;
642 default:
643 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
644 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
645 radeon_agpmode = 0;
646 break;
647 }
648}
649
650static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
651{
652 struct drm_device *dev = pci_get_drvdata(pdev);
653 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
654 if (state == VGA_SWITCHEROO_ON) {
655 printk(KERN_INFO "radeon: switched on\n");
656
657 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
658 radeon_resume_kms(dev);
659 dev->switch_power_state = DRM_SWITCH_POWER_ON;
660 drm_kms_helper_poll_enable(dev);
661 } else {
662 printk(KERN_INFO "radeon: switched off\n");
663 drm_kms_helper_poll_disable(dev);
664 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
665 radeon_suspend_kms(dev, pmm);
666 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
667 }
668}
669
670static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
671{
672 struct drm_device *dev = pci_get_drvdata(pdev);
673 bool can_switch;
674
675 spin_lock(&dev->count_lock);
676 can_switch = (dev->open_count == 0);
677 spin_unlock(&dev->count_lock);
678 return can_switch;
679}
680
681
682int radeon_device_init(struct radeon_device *rdev,
683 struct drm_device *ddev,
684 struct pci_dev *pdev,
685 uint32_t flags)
686{
687 int r, i;
688 int dma_bits;
689
690 rdev->shutdown = false;
691 rdev->dev = &pdev->dev;
692 rdev->ddev = ddev;
693 rdev->pdev = pdev;
694 rdev->flags = flags;
695 rdev->family = flags & RADEON_FAMILY_MASK;
696 rdev->is_atom_bios = false;
697 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
698 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
699 rdev->gpu_lockup = false;
700 rdev->accel_working = false;
701
702 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
703 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
704
705
706
707 mutex_init(&rdev->cs_mutex);
708 mutex_init(&rdev->ib_pool.mutex);
709 mutex_init(&rdev->cp.mutex);
710 mutex_init(&rdev->dc_hw_i2c_mutex);
711 if (rdev->family >= CHIP_R600)
712 spin_lock_init(&rdev->ih.lock);
713 mutex_init(&rdev->gem.mutex);
714 mutex_init(&rdev->pm.mutex);
715 mutex_init(&rdev->vram_mutex);
716 rwlock_init(&rdev->fence_drv.lock);
717 INIT_LIST_HEAD(&rdev->gem.objects);
718 init_waitqueue_head(&rdev->irq.vblank_queue);
719 init_waitqueue_head(&rdev->irq.idle_queue);
720
721
722 r = radeon_asic_init(rdev);
723 if (r)
724 return r;
725 radeon_check_arguments(rdev);
726
727
728
729
730 if ((rdev->family >= CHIP_RS400) &&
731 (rdev->flags & RADEON_IS_IGP)) {
732 rdev->flags &= ~RADEON_IS_AGP;
733 }
734
735 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
736 radeon_agp_disable(rdev);
737 }
738
739
740
741
742
743
744
745 rdev->need_dma32 = false;
746 if (rdev->flags & RADEON_IS_AGP)
747 rdev->need_dma32 = true;
748 if (rdev->flags & RADEON_IS_PCI)
749 rdev->need_dma32 = true;
750
751 dma_bits = rdev->need_dma32 ? 32 : 40;
752 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
753 if (r) {
754 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
755 }
756
757
758
759 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
760 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
761 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
762 if (rdev->rmmio == NULL) {
763 return -ENOMEM;
764 }
765 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
766 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
767
768
769 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
770 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
771 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
772 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
773 break;
774 }
775 }
776 if (rdev->rio_mem == NULL)
777 DRM_ERROR("Unable to find PCI I/O BAR\n");
778
779
780
781
782 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
783 vga_switcheroo_register_client(rdev->pdev,
784 radeon_switcheroo_set_state,
785 NULL,
786 radeon_switcheroo_can_switch);
787
788 r = radeon_init(rdev);
789 if (r)
790 return r;
791
792 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
793
794
795
796 radeon_asic_reset(rdev);
797 radeon_fini(rdev);
798 radeon_agp_disable(rdev);
799 r = radeon_init(rdev);
800 if (r)
801 return r;
802 }
803 if (radeon_testing) {
804 radeon_test_moves(rdev);
805 }
806 if (radeon_benchmarking) {
807 radeon_benchmark(rdev);
808 }
809 return 0;
810}
811
812void radeon_device_fini(struct radeon_device *rdev)
813{
814 DRM_INFO("radeon: finishing device.\n");
815 rdev->shutdown = true;
816
817 radeon_bo_evict_vram(rdev);
818 radeon_fini(rdev);
819 vga_switcheroo_unregister_client(rdev->pdev);
820 vga_client_register(rdev->pdev, NULL, NULL, NULL);
821 if (rdev->rio_mem)
822 pci_iounmap(rdev->pdev, rdev->rio_mem);
823 rdev->rio_mem = NULL;
824 iounmap(rdev->rmmio);
825 rdev->rmmio = NULL;
826}
827
828
829
830
831
832int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
833{
834 struct radeon_device *rdev;
835 struct drm_crtc *crtc;
836 struct drm_connector *connector;
837 int r;
838
839 if (dev == NULL || dev->dev_private == NULL) {
840 return -ENODEV;
841 }
842 if (state.event == PM_EVENT_PRETHAW) {
843 return 0;
844 }
845 rdev = dev->dev_private;
846
847 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
848 return 0;
849
850
851 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
852 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
853 }
854
855
856 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
857 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
858 struct radeon_bo *robj;
859
860 if (rfb == NULL || rfb->obj == NULL) {
861 continue;
862 }
863 robj = rfb->obj->driver_private;
864
865 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
866 r = radeon_bo_reserve(robj, false);
867 if (r == 0) {
868 radeon_bo_unpin(robj);
869 radeon_bo_unreserve(robj);
870 }
871 }
872 }
873
874 radeon_bo_evict_vram(rdev);
875
876 radeon_fence_wait_last(rdev);
877
878 radeon_save_bios_scratch_regs(rdev);
879
880 radeon_pm_suspend(rdev);
881 radeon_suspend(rdev);
882 radeon_hpd_fini(rdev);
883
884 radeon_bo_evict_vram(rdev);
885
886 radeon_agp_suspend(rdev);
887
888 pci_save_state(dev->pdev);
889 if (state.event == PM_EVENT_SUSPEND) {
890
891 pci_disable_device(dev->pdev);
892 pci_set_power_state(dev->pdev, PCI_D3hot);
893 }
894 console_lock();
895 radeon_fbdev_set_suspend(rdev, 1);
896 console_unlock();
897 return 0;
898}
899
900int radeon_resume_kms(struct drm_device *dev)
901{
902 struct drm_connector *connector;
903 struct radeon_device *rdev = dev->dev_private;
904
905 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
906 return 0;
907
908 console_lock();
909 pci_set_power_state(dev->pdev, PCI_D0);
910 pci_restore_state(dev->pdev);
911 if (pci_enable_device(dev->pdev)) {
912 console_unlock();
913 return -1;
914 }
915 pci_set_master(dev->pdev);
916
917 radeon_agp_resume(rdev);
918 radeon_resume(rdev);
919 radeon_pm_resume(rdev);
920 radeon_restore_bios_scratch_regs(rdev);
921
922 radeon_fbdev_set_suspend(rdev, 0);
923 console_unlock();
924
925
926 radeon_hpd_init(rdev);
927
928 drm_helper_resume_force_mode(dev);
929
930 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
931 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
932 }
933 return 0;
934}
935
936int radeon_gpu_reset(struct radeon_device *rdev)
937{
938 int r;
939 int resched;
940
941 radeon_save_bios_scratch_regs(rdev);
942
943 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
944 radeon_suspend(rdev);
945
946 r = radeon_asic_reset(rdev);
947 if (!r) {
948 dev_info(rdev->dev, "GPU reset succeed\n");
949 radeon_resume(rdev);
950 radeon_restore_bios_scratch_regs(rdev);
951 drm_helper_resume_force_mode(rdev->ddev);
952 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
953 return 0;
954 }
955
956 dev_info(rdev->dev, "GPU reset failed\n");
957 return r;
958}
959
960
961
962
963
964struct radeon_debugfs {
965 struct drm_info_list *files;
966 unsigned num_files;
967};
968static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
969static unsigned _radeon_debugfs_count = 0;
970
971int radeon_debugfs_add_files(struct radeon_device *rdev,
972 struct drm_info_list *files,
973 unsigned nfiles)
974{
975 unsigned i;
976
977 for (i = 0; i < _radeon_debugfs_count; i++) {
978 if (_radeon_debugfs[i].files == files) {
979
980 return 0;
981 }
982 }
983 if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
984 DRM_ERROR("Reached maximum number of debugfs files.\n");
985 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
986 return -EINVAL;
987 }
988 _radeon_debugfs[_radeon_debugfs_count].files = files;
989 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
990 _radeon_debugfs_count++;
991#if defined(CONFIG_DEBUG_FS)
992 drm_debugfs_create_files(files, nfiles,
993 rdev->ddev->control->debugfs_root,
994 rdev->ddev->control);
995 drm_debugfs_create_files(files, nfiles,
996 rdev->ddev->primary->debugfs_root,
997 rdev->ddev->primary);
998#endif
999 return 0;
1000}
1001
1002#if defined(CONFIG_DEBUG_FS)
1003int radeon_debugfs_init(struct drm_minor *minor)
1004{
1005 return 0;
1006}
1007
1008void radeon_debugfs_cleanup(struct drm_minor *minor)
1009{
1010 unsigned i;
1011
1012 for (i = 0; i < _radeon_debugfs_count; i++) {
1013 drm_debugfs_remove_files(_radeon_debugfs[i].files,
1014 _radeon_debugfs[i].num_files, minor);
1015 }
1016}
1017#endif
1018