linux/drivers/gpu/drm/radeon/r420.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/seq_file.h>
  29#include <linux/slab.h>
  30#include <drm/drmP.h>
  31#include "radeon_reg.h"
  32#include "radeon.h"
  33#include "radeon_asic.h"
  34#include "atom.h"
  35#include "r100d.h"
  36#include "r420d.h"
  37#include "r420_reg_safe.h"
  38
  39void r420_pm_init_profile(struct radeon_device *rdev)
  40{
  41        /* default */
  42        rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  43        rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  44        rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  45        rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  46        /* low sh */
  47        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
  48        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
  49        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  50        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  51        /* mid sh */
  52        rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
  53        rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
  54        rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  55        rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  56        /* high sh */
  57        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
  58        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  59        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  60        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  61        /* low mh */
  62        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
  63        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  64        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  65        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  66        /* mid mh */
  67        rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
  68        rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  69        rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  70        rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  71        /* high mh */
  72        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
  73        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  74        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  75        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  76}
  77
  78static void r420_set_reg_safe(struct radeon_device *rdev)
  79{
  80        rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
  81        rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
  82}
  83
  84void r420_pipes_init(struct radeon_device *rdev)
  85{
  86        unsigned tmp;
  87        unsigned gb_pipe_select;
  88        unsigned num_pipes;
  89
  90        /* GA_ENHANCE workaround TCL deadlock issue */
  91        WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
  92               (1 << 2) | (1 << 3));
  93        /* add idle wait as per freedesktop.org bug 24041 */
  94        if (r100_gui_wait_for_idle(rdev)) {
  95                printk(KERN_WARNING "Failed to wait GUI idle while "
  96                       "programming pipes. Bad things might happen.\n");
  97        }
  98        /* get max number of pipes */
  99        gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
 100        num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
 101
 102        /* SE chips have 1 pipe */
 103        if ((rdev->pdev->device == 0x5e4c) ||
 104            (rdev->pdev->device == 0x5e4f))
 105                num_pipes = 1;
 106
 107        rdev->num_gb_pipes = num_pipes;
 108        tmp = 0;
 109        switch (num_pipes) {
 110        default:
 111                /* force to 1 pipe */
 112                num_pipes = 1;
 113        case 1:
 114                tmp = (0 << 1);
 115                break;
 116        case 2:
 117                tmp = (3 << 1);
 118                break;
 119        case 3:
 120                tmp = (6 << 1);
 121                break;
 122        case 4:
 123                tmp = (7 << 1);
 124                break;
 125        }
 126        WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
 127        /* Sub pixel 1/12 so we can have 4K rendering according to doc */
 128        tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
 129        WREG32(R300_GB_TILE_CONFIG, tmp);
 130        if (r100_gui_wait_for_idle(rdev)) {
 131                printk(KERN_WARNING "Failed to wait GUI idle while "
 132                       "programming pipes. Bad things might happen.\n");
 133        }
 134
 135        tmp = RREG32(R300_DST_PIPE_CONFIG);
 136        WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
 137
 138        WREG32(R300_RB2D_DSTCACHE_MODE,
 139               RREG32(R300_RB2D_DSTCACHE_MODE) |
 140               R300_DC_AUTOFLUSH_ENABLE |
 141               R300_DC_DC_DISABLE_IGNORE_PE);
 142
 143        if (r100_gui_wait_for_idle(rdev)) {
 144                printk(KERN_WARNING "Failed to wait GUI idle while "
 145                       "programming pipes. Bad things might happen.\n");
 146        }
 147
 148        if (rdev->family == CHIP_RV530) {
 149                tmp = RREG32(RV530_GB_PIPE_SELECT2);
 150                if ((tmp & 3) == 3)
 151                        rdev->num_z_pipes = 2;
 152                else
 153                        rdev->num_z_pipes = 1;
 154        } else
 155                rdev->num_z_pipes = 1;
 156
 157        DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
 158                 rdev->num_gb_pipes, rdev->num_z_pipes);
 159}
 160
 161u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
 162{
 163        unsigned long flags;
 164        u32 r;
 165
 166        spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 167        WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
 168        r = RREG32(R_0001FC_MC_IND_DATA);
 169        spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 170        return r;
 171}
 172
 173void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 174{
 175        unsigned long flags;
 176
 177        spin_lock_irqsave(&rdev->mc_idx_lock, flags);
 178        WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
 179                S_0001F8_MC_IND_WR_EN(1));
 180        WREG32(R_0001FC_MC_IND_DATA, v);
 181        spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 182}
 183
 184static void r420_debugfs(struct radeon_device *rdev)
 185{
 186        if (r100_debugfs_rbbm_init(rdev)) {
 187                DRM_ERROR("Failed to register debugfs file for RBBM !\n");
 188        }
 189        if (r420_debugfs_pipes_info_init(rdev)) {
 190                DRM_ERROR("Failed to register debugfs file for pipes !\n");
 191        }
 192}
 193
 194static void r420_clock_resume(struct radeon_device *rdev)
 195{
 196        u32 sclk_cntl;
 197
 198        if (radeon_dynclks != -1 && radeon_dynclks)
 199                radeon_atom_set_clock_gating(rdev, 1);
 200        sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
 201        sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
 202        if (rdev->family == CHIP_R420)
 203                sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
 204        WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
 205}
 206
 207static void r420_cp_errata_init(struct radeon_device *rdev)
 208{
 209        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 210
 211        /* RV410 and R420 can lock up if CP DMA to host memory happens
 212         * while the 2D engine is busy.
 213         *
 214         * The proper workaround is to queue a RESYNC at the beginning
 215         * of the CP init, apparently.
 216         */
 217        radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
 218        radeon_ring_lock(rdev, ring, 8);
 219        radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
 220        radeon_ring_write(ring, rdev->config.r300.resync_scratch);
 221        radeon_ring_write(ring, 0xDEADBEEF);
 222        radeon_ring_unlock_commit(rdev, ring, false);
 223}
 224
 225static void r420_cp_errata_fini(struct radeon_device *rdev)
 226{
 227        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 228
 229        /* Catch the RESYNC we dispatched all the way back,
 230         * at the very beginning of the CP init.
 231         */
 232        radeon_ring_lock(rdev, ring, 8);
 233        radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
 234        radeon_ring_write(ring, R300_RB3D_DC_FINISH);
 235        radeon_ring_unlock_commit(rdev, ring, false);
 236        radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
 237}
 238
 239static int r420_startup(struct radeon_device *rdev)
 240{
 241        int r;
 242
 243        /* set common regs */
 244        r100_set_common_regs(rdev);
 245        /* program mc */
 246        r300_mc_program(rdev);
 247        /* Resume clock */
 248        r420_clock_resume(rdev);
 249        /* Initialize GART (initialize after TTM so we can allocate
 250         * memory through TTM but finalize after TTM) */
 251        if (rdev->flags & RADEON_IS_PCIE) {
 252                r = rv370_pcie_gart_enable(rdev);
 253                if (r)
 254                        return r;
 255        }
 256        if (rdev->flags & RADEON_IS_PCI) {
 257                r = r100_pci_gart_enable(rdev);
 258                if (r)
 259                        return r;
 260        }
 261        r420_pipes_init(rdev);
 262
 263        /* allocate wb buffer */
 264        r = radeon_wb_init(rdev);
 265        if (r)
 266                return r;
 267
 268        r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
 269        if (r) {
 270                dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
 271                return r;
 272        }
 273
 274        /* Enable IRQ */
 275        if (!rdev->irq.installed) {
 276                r = radeon_irq_kms_init(rdev);
 277                if (r)
 278                        return r;
 279        }
 280
 281        r100_irq_set(rdev);
 282        rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 283        /* 1M ring buffer */
 284        r = r100_cp_init(rdev, 1024 * 1024);
 285        if (r) {
 286                dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
 287                return r;
 288        }
 289        r420_cp_errata_init(rdev);
 290
 291        r = radeon_ib_pool_init(rdev);
 292        if (r) {
 293                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
 294                return r;
 295        }
 296
 297        return 0;
 298}
 299
 300int r420_resume(struct radeon_device *rdev)
 301{
 302        int r;
 303
 304        /* Make sur GART are not working */
 305        if (rdev->flags & RADEON_IS_PCIE)
 306                rv370_pcie_gart_disable(rdev);
 307        if (rdev->flags & RADEON_IS_PCI)
 308                r100_pci_gart_disable(rdev);
 309        /* Resume clock before doing reset */
 310        r420_clock_resume(rdev);
 311        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
 312        if (radeon_asic_reset(rdev)) {
 313                dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 314                        RREG32(R_000E40_RBBM_STATUS),
 315                        RREG32(R_0007C0_CP_STAT));
 316        }
 317        /* check if cards are posted or not */
 318        if (rdev->is_atom_bios) {
 319                atom_asic_init(rdev->mode_info.atom_context);
 320        } else {
 321                radeon_combios_asic_init(rdev->ddev);
 322        }
 323        /* Resume clock after posting */
 324        r420_clock_resume(rdev);
 325        /* Initialize surface registers */
 326        radeon_surface_init(rdev);
 327
 328        rdev->accel_working = true;
 329        r = r420_startup(rdev);
 330        if (r) {
 331                rdev->accel_working = false;
 332        }
 333        return r;
 334}
 335
 336int r420_suspend(struct radeon_device *rdev)
 337{
 338        radeon_pm_suspend(rdev);
 339        r420_cp_errata_fini(rdev);
 340        r100_cp_disable(rdev);
 341        radeon_wb_disable(rdev);
 342        r100_irq_disable(rdev);
 343        if (rdev->flags & RADEON_IS_PCIE)
 344                rv370_pcie_gart_disable(rdev);
 345        if (rdev->flags & RADEON_IS_PCI)
 346                r100_pci_gart_disable(rdev);
 347        return 0;
 348}
 349
 350void r420_fini(struct radeon_device *rdev)
 351{
 352        radeon_pm_fini(rdev);
 353        r100_cp_fini(rdev);
 354        radeon_wb_fini(rdev);
 355        radeon_ib_pool_fini(rdev);
 356        radeon_gem_fini(rdev);
 357        if (rdev->flags & RADEON_IS_PCIE)
 358                rv370_pcie_gart_fini(rdev);
 359        if (rdev->flags & RADEON_IS_PCI)
 360                r100_pci_gart_fini(rdev);
 361        radeon_agp_fini(rdev);
 362        radeon_irq_kms_fini(rdev);
 363        radeon_fence_driver_fini(rdev);
 364        radeon_bo_fini(rdev);
 365        if (rdev->is_atom_bios) {
 366                radeon_atombios_fini(rdev);
 367        } else {
 368                radeon_combios_fini(rdev);
 369        }
 370        kfree(rdev->bios);
 371        rdev->bios = NULL;
 372}
 373
 374int r420_init(struct radeon_device *rdev)
 375{
 376        int r;
 377
 378        /* Initialize scratch registers */
 379        radeon_scratch_init(rdev);
 380        /* Initialize surface registers */
 381        radeon_surface_init(rdev);
 382        /* TODO: disable VGA need to use VGA request */
 383        /* restore some register to sane defaults */
 384        r100_restore_sanity(rdev);
 385        /* BIOS*/
 386        if (!radeon_get_bios(rdev)) {
 387                if (ASIC_IS_AVIVO(rdev))
 388                        return -EINVAL;
 389        }
 390        if (rdev->is_atom_bios) {
 391                r = radeon_atombios_init(rdev);
 392                if (r) {
 393                        return r;
 394                }
 395        } else {
 396                r = radeon_combios_init(rdev);
 397                if (r) {
 398                        return r;
 399                }
 400        }
 401        /* Reset gpu before posting otherwise ATOM will enter infinite loop */
 402        if (radeon_asic_reset(rdev)) {
 403                dev_warn(rdev->dev,
 404                        "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
 405                        RREG32(R_000E40_RBBM_STATUS),
 406                        RREG32(R_0007C0_CP_STAT));
 407        }
 408        /* check if cards are posted or not */
 409        if (radeon_boot_test_post_card(rdev) == false)
 410                return -EINVAL;
 411
 412        /* Initialize clocks */
 413        radeon_get_clock_info(rdev->ddev);
 414        /* initialize AGP */
 415        if (rdev->flags & RADEON_IS_AGP) {
 416                r = radeon_agp_init(rdev);
 417                if (r) {
 418                        radeon_agp_disable(rdev);
 419                }
 420        }
 421        /* initialize memory controller */
 422        r300_mc_init(rdev);
 423        r420_debugfs(rdev);
 424        /* Fence driver */
 425        r = radeon_fence_driver_init(rdev);
 426        if (r) {
 427                return r;
 428        }
 429        /* Memory manager */
 430        r = radeon_bo_init(rdev);
 431        if (r) {
 432                return r;
 433        }
 434        if (rdev->family == CHIP_R420)
 435                r100_enable_bm(rdev);
 436
 437        if (rdev->flags & RADEON_IS_PCIE) {
 438                r = rv370_pcie_gart_init(rdev);
 439                if (r)
 440                        return r;
 441        }
 442        if (rdev->flags & RADEON_IS_PCI) {
 443                r = r100_pci_gart_init(rdev);
 444                if (r)
 445                        return r;
 446        }
 447        r420_set_reg_safe(rdev);
 448
 449        /* Initialize power management */
 450        radeon_pm_init(rdev);
 451
 452        rdev->accel_working = true;
 453        r = r420_startup(rdev);
 454        if (r) {
 455                /* Somethings want wront with the accel init stop accel */
 456                dev_err(rdev->dev, "Disabling GPU acceleration\n");
 457                r100_cp_fini(rdev);
 458                radeon_wb_fini(rdev);
 459                radeon_ib_pool_fini(rdev);
 460                radeon_irq_kms_fini(rdev);
 461                if (rdev->flags & RADEON_IS_PCIE)
 462                        rv370_pcie_gart_fini(rdev);
 463                if (rdev->flags & RADEON_IS_PCI)
 464                        r100_pci_gart_fini(rdev);
 465                radeon_agp_fini(rdev);
 466                rdev->accel_working = false;
 467        }
 468        return 0;
 469}
 470
 471/*
 472 * Debugfs info
 473 */
 474#if defined(CONFIG_DEBUG_FS)
 475static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
 476{
 477        struct drm_info_node *node = (struct drm_info_node *) m->private;
 478        struct drm_device *dev = node->minor->dev;
 479        struct radeon_device *rdev = dev->dev_private;
 480        uint32_t tmp;
 481
 482        tmp = RREG32(R400_GB_PIPE_SELECT);
 483        seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
 484        tmp = RREG32(R300_GB_TILE_CONFIG);
 485        seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
 486        tmp = RREG32(R300_DST_PIPE_CONFIG);
 487        seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
 488        return 0;
 489}
 490
 491static struct drm_info_list r420_pipes_info_list[] = {
 492        {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
 493};
 494#endif
 495
 496int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
 497{
 498#if defined(CONFIG_DEBUG_FS)
 499        return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
 500#else
 501        return 0;
 502#endif
 503}
 504