linux/drivers/gpu/drm/amd/amdgpu/vi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include "drmP.h"
  27#include "amdgpu.h"
  28#include "amdgpu_atombios.h"
  29#include "amdgpu_ih.h"
  30#include "amdgpu_uvd.h"
  31#include "amdgpu_vce.h"
  32#include "amdgpu_ucode.h"
  33#include "atom.h"
  34
  35#include "gmc/gmc_8_1_d.h"
  36#include "gmc/gmc_8_1_sh_mask.h"
  37
  38#include "oss/oss_3_0_d.h"
  39#include "oss/oss_3_0_sh_mask.h"
  40
  41#include "bif/bif_5_0_d.h"
  42#include "bif/bif_5_0_sh_mask.h"
  43
  44#include "gca/gfx_8_0_d.h"
  45#include "gca/gfx_8_0_sh_mask.h"
  46
  47#include "smu/smu_7_1_1_d.h"
  48#include "smu/smu_7_1_1_sh_mask.h"
  49
  50#include "uvd/uvd_5_0_d.h"
  51#include "uvd/uvd_5_0_sh_mask.h"
  52
  53#include "vce/vce_3_0_d.h"
  54#include "vce/vce_3_0_sh_mask.h"
  55
  56#include "dce/dce_10_0_d.h"
  57#include "dce/dce_10_0_sh_mask.h"
  58
  59#include "vid.h"
  60#include "vi.h"
  61#include "vi_dpm.h"
  62#include "gmc_v8_0.h"
  63#include "gfx_v8_0.h"
  64#include "sdma_v2_4.h"
  65#include "sdma_v3_0.h"
  66#include "dce_v10_0.h"
  67#include "dce_v11_0.h"
  68#include "iceland_ih.h"
  69#include "tonga_ih.h"
  70#include "cz_ih.h"
  71#include "uvd_v5_0.h"
  72#include "uvd_v6_0.h"
  73#include "vce_v3_0.h"
  74
  75/*
  76 * Indirect registers accessor
  77 */
  78static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  79{
  80        unsigned long flags;
  81        u32 r;
  82
  83        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  84        WREG32(mmPCIE_INDEX, reg);
  85        (void)RREG32(mmPCIE_INDEX);
  86        r = RREG32(mmPCIE_DATA);
  87        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  88        return r;
  89}
  90
  91static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  92{
  93        unsigned long flags;
  94
  95        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  96        WREG32(mmPCIE_INDEX, reg);
  97        (void)RREG32(mmPCIE_INDEX);
  98        WREG32(mmPCIE_DATA, v);
  99        (void)RREG32(mmPCIE_DATA);
 100        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 101}
 102
 103static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
 104{
 105        unsigned long flags;
 106        u32 r;
 107
 108        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 109        WREG32(mmSMC_IND_INDEX_0, (reg));
 110        r = RREG32(mmSMC_IND_DATA_0);
 111        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 112        return r;
 113}
 114
 115static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 116{
 117        unsigned long flags;
 118
 119        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 120        WREG32(mmSMC_IND_INDEX_0, (reg));
 121        WREG32(mmSMC_IND_DATA_0, (v));
 122        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 123}
 124
 125/* smu_8_0_d.h */
 126#define mmMP0PUB_IND_INDEX                                                      0x180
 127#define mmMP0PUB_IND_DATA                                                       0x181
 128
 129static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
 130{
 131        unsigned long flags;
 132        u32 r;
 133
 134        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 135        WREG32(mmMP0PUB_IND_INDEX, (reg));
 136        r = RREG32(mmMP0PUB_IND_DATA);
 137        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 138        return r;
 139}
 140
 141static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 142{
 143        unsigned long flags;
 144
 145        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 146        WREG32(mmMP0PUB_IND_INDEX, (reg));
 147        WREG32(mmMP0PUB_IND_DATA, (v));
 148        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 149}
 150
 151static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 152{
 153        unsigned long flags;
 154        u32 r;
 155
 156        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 157        WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
 158        r = RREG32(mmUVD_CTX_DATA);
 159        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 160        return r;
 161}
 162
 163static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 164{
 165        unsigned long flags;
 166
 167        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 168        WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
 169        WREG32(mmUVD_CTX_DATA, (v));
 170        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 171}
 172
 173static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
 174{
 175        unsigned long flags;
 176        u32 r;
 177
 178        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 179        WREG32(mmDIDT_IND_INDEX, (reg));
 180        r = RREG32(mmDIDT_IND_DATA);
 181        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 182        return r;
 183}
 184
 185static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 186{
 187        unsigned long flags;
 188
 189        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 190        WREG32(mmDIDT_IND_INDEX, (reg));
 191        WREG32(mmDIDT_IND_DATA, (v));
 192        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 193}
 194
 195static const u32 tonga_mgcg_cgcg_init[] =
 196{
 197        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 198        mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 199        mmPCIE_DATA, 0x000f0000, 0x00000000,
 200        mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
 201        mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 202        mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 203        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 204};
 205
 206static const u32 iceland_mgcg_cgcg_init[] =
 207{
 208        mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
 209        mmPCIE_DATA, 0x000f0000, 0x00000000,
 210        mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
 211        mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 212        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 213};
 214
 215static const u32 cz_mgcg_cgcg_init[] =
 216{
 217        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 218        mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 219        mmPCIE_DATA, 0x000f0000, 0x00000000,
 220        mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 221        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 222};
 223
 224static void vi_init_golden_registers(struct amdgpu_device *adev)
 225{
 226        /* Some of the registers might be dependent on GRBM_GFX_INDEX */
 227        mutex_lock(&adev->grbm_idx_mutex);
 228
 229        switch (adev->asic_type) {
 230        case CHIP_TOPAZ:
 231                amdgpu_program_register_sequence(adev,
 232                                                 iceland_mgcg_cgcg_init,
 233                                                 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 234                break;
 235        case CHIP_TONGA:
 236                amdgpu_program_register_sequence(adev,
 237                                                 tonga_mgcg_cgcg_init,
 238                                                 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 239                break;
 240        case CHIP_CARRIZO:
 241                amdgpu_program_register_sequence(adev,
 242                                                 cz_mgcg_cgcg_init,
 243                                                 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 244                break;
 245        default:
 246                break;
 247        }
 248        mutex_unlock(&adev->grbm_idx_mutex);
 249}
 250
 251/**
 252 * vi_get_xclk - get the xclk
 253 *
 254 * @adev: amdgpu_device pointer
 255 *
 256 * Returns the reference clock used by the gfx engine
 257 * (VI).
 258 */
 259static u32 vi_get_xclk(struct amdgpu_device *adev)
 260{
 261        u32 reference_clock = adev->clock.spll.reference_freq;
 262        u32 tmp;
 263
 264        if (adev->flags & AMDGPU_IS_APU)
 265                return reference_clock;
 266
 267        tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
 268        if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
 269                return 1000;
 270
 271        tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
 272        if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
 273                return reference_clock / 4;
 274
 275        return reference_clock;
 276}
 277
 278/**
 279 * vi_srbm_select - select specific register instances
 280 *
 281 * @adev: amdgpu_device pointer
 282 * @me: selected ME (micro engine)
 283 * @pipe: pipe
 284 * @queue: queue
 285 * @vmid: VMID
 286 *
 287 * Switches the currently active registers instances.  Some
 288 * registers are instanced per VMID, others are instanced per
 289 * me/pipe/queue combination.
 290 */
 291void vi_srbm_select(struct amdgpu_device *adev,
 292                     u32 me, u32 pipe, u32 queue, u32 vmid)
 293{
 294        u32 srbm_gfx_cntl = 0;
 295        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
 296        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
 297        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
 298        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
 299        WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
 300}
 301
 302static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
 303{
 304        /* todo */
 305}
 306
 307static bool vi_read_disabled_bios(struct amdgpu_device *adev)
 308{
 309        u32 bus_cntl;
 310        u32 d1vga_control = 0;
 311        u32 d2vga_control = 0;
 312        u32 vga_render_control = 0;
 313        u32 rom_cntl;
 314        bool r;
 315
 316        bus_cntl = RREG32(mmBUS_CNTL);
 317        if (adev->mode_info.num_crtc) {
 318                d1vga_control = RREG32(mmD1VGA_CONTROL);
 319                d2vga_control = RREG32(mmD2VGA_CONTROL);
 320                vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 321        }
 322        rom_cntl = RREG32_SMC(ixROM_CNTL);
 323
 324        /* enable the rom */
 325        WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
 326        if (adev->mode_info.num_crtc) {
 327                /* Disable VGA mode */
 328                WREG32(mmD1VGA_CONTROL,
 329                       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
 330                                          D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
 331                WREG32(mmD2VGA_CONTROL,
 332                       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
 333                                          D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
 334                WREG32(mmVGA_RENDER_CONTROL,
 335                       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
 336        }
 337        WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
 338
 339        r = amdgpu_read_bios(adev);
 340
 341        /* restore regs */
 342        WREG32(mmBUS_CNTL, bus_cntl);
 343        if (adev->mode_info.num_crtc) {
 344                WREG32(mmD1VGA_CONTROL, d1vga_control);
 345                WREG32(mmD2VGA_CONTROL, d2vga_control);
 346                WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
 347        }
 348        WREG32_SMC(ixROM_CNTL, rom_cntl);
 349        return r;
 350}
 351static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
 352        {mmGB_MACROTILE_MODE7, true},
 353};
 354
 355static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
 356        {mmGB_TILE_MODE7, true},
 357        {mmGB_TILE_MODE12, true},
 358        {mmGB_TILE_MODE17, true},
 359        {mmGB_TILE_MODE23, true},
 360        {mmGB_MACROTILE_MODE7, true},
 361};
 362
 363static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 364        {mmGRBM_STATUS, false},
 365        {mmGB_ADDR_CONFIG, false},
 366        {mmMC_ARB_RAMCFG, false},
 367        {mmGB_TILE_MODE0, false},
 368        {mmGB_TILE_MODE1, false},
 369        {mmGB_TILE_MODE2, false},
 370        {mmGB_TILE_MODE3, false},
 371        {mmGB_TILE_MODE4, false},
 372        {mmGB_TILE_MODE5, false},
 373        {mmGB_TILE_MODE6, false},
 374        {mmGB_TILE_MODE7, false},
 375        {mmGB_TILE_MODE8, false},
 376        {mmGB_TILE_MODE9, false},
 377        {mmGB_TILE_MODE10, false},
 378        {mmGB_TILE_MODE11, false},
 379        {mmGB_TILE_MODE12, false},
 380        {mmGB_TILE_MODE13, false},
 381        {mmGB_TILE_MODE14, false},
 382        {mmGB_TILE_MODE15, false},
 383        {mmGB_TILE_MODE16, false},
 384        {mmGB_TILE_MODE17, false},
 385        {mmGB_TILE_MODE18, false},
 386        {mmGB_TILE_MODE19, false},
 387        {mmGB_TILE_MODE20, false},
 388        {mmGB_TILE_MODE21, false},
 389        {mmGB_TILE_MODE22, false},
 390        {mmGB_TILE_MODE23, false},
 391        {mmGB_TILE_MODE24, false},
 392        {mmGB_TILE_MODE25, false},
 393        {mmGB_TILE_MODE26, false},
 394        {mmGB_TILE_MODE27, false},
 395        {mmGB_TILE_MODE28, false},
 396        {mmGB_TILE_MODE29, false},
 397        {mmGB_TILE_MODE30, false},
 398        {mmGB_TILE_MODE31, false},
 399        {mmGB_MACROTILE_MODE0, false},
 400        {mmGB_MACROTILE_MODE1, false},
 401        {mmGB_MACROTILE_MODE2, false},
 402        {mmGB_MACROTILE_MODE3, false},
 403        {mmGB_MACROTILE_MODE4, false},
 404        {mmGB_MACROTILE_MODE5, false},
 405        {mmGB_MACROTILE_MODE6, false},
 406        {mmGB_MACROTILE_MODE7, false},
 407        {mmGB_MACROTILE_MODE8, false},
 408        {mmGB_MACROTILE_MODE9, false},
 409        {mmGB_MACROTILE_MODE10, false},
 410        {mmGB_MACROTILE_MODE11, false},
 411        {mmGB_MACROTILE_MODE12, false},
 412        {mmGB_MACROTILE_MODE13, false},
 413        {mmGB_MACROTILE_MODE14, false},
 414        {mmGB_MACROTILE_MODE15, false},
 415        {mmCC_RB_BACKEND_DISABLE, false, true},
 416        {mmGC_USER_RB_BACKEND_DISABLE, false, true},
 417        {mmGB_BACKEND_MAP, false, false},
 418        {mmPA_SC_RASTER_CONFIG, false, true},
 419        {mmPA_SC_RASTER_CONFIG_1, false, true},
 420};
 421
 422static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 423                                         u32 sh_num, u32 reg_offset)
 424{
 425        uint32_t val;
 426
 427        mutex_lock(&adev->grbm_idx_mutex);
 428        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 429                gfx_v8_0_select_se_sh(adev, se_num, sh_num);
 430
 431        val = RREG32(reg_offset);
 432
 433        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 434                gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
 435        mutex_unlock(&adev->grbm_idx_mutex);
 436        return val;
 437}
 438
 439static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 440                            u32 sh_num, u32 reg_offset, u32 *value)
 441{
 442        struct amdgpu_allowed_register_entry *asic_register_table = NULL;
 443        struct amdgpu_allowed_register_entry *asic_register_entry;
 444        uint32_t size, i;
 445
 446        *value = 0;
 447        switch (adev->asic_type) {
 448        case CHIP_TOPAZ:
 449                asic_register_table = tonga_allowed_read_registers;
 450                size = ARRAY_SIZE(tonga_allowed_read_registers);
 451                break;
 452        case CHIP_TONGA:
 453        case CHIP_CARRIZO:
 454                asic_register_table = cz_allowed_read_registers;
 455                size = ARRAY_SIZE(cz_allowed_read_registers);
 456                break;
 457        default:
 458                return -EINVAL;
 459        }
 460
 461        if (asic_register_table) {
 462                for (i = 0; i < size; i++) {
 463                        asic_register_entry = asic_register_table + i;
 464                        if (reg_offset != asic_register_entry->reg_offset)
 465                                continue;
 466                        if (!asic_register_entry->untouched)
 467                                *value = asic_register_entry->grbm_indexed ?
 468                                        vi_read_indexed_register(adev, se_num,
 469                                                                 sh_num, reg_offset) :
 470                                        RREG32(reg_offset);
 471                        return 0;
 472                }
 473        }
 474
 475        for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
 476                if (reg_offset != vi_allowed_read_registers[i].reg_offset)
 477                        continue;
 478
 479                if (!vi_allowed_read_registers[i].untouched)
 480                        *value = vi_allowed_read_registers[i].grbm_indexed ?
 481                                vi_read_indexed_register(adev, se_num,
 482                                                         sh_num, reg_offset) :
 483                                RREG32(reg_offset);
 484                return 0;
 485        }
 486        return -EINVAL;
 487}
 488
 489static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
 490{
 491        dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
 492                RREG32(mmGRBM_STATUS));
 493        dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
 494                RREG32(mmGRBM_STATUS2));
 495        dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
 496                RREG32(mmGRBM_STATUS_SE0));
 497        dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
 498                RREG32(mmGRBM_STATUS_SE1));
 499        dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
 500                RREG32(mmGRBM_STATUS_SE2));
 501        dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
 502                RREG32(mmGRBM_STATUS_SE3));
 503        dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
 504                RREG32(mmSRBM_STATUS));
 505        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
 506                RREG32(mmSRBM_STATUS2));
 507        dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
 508                RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
 509        dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
 510                 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
 511        dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
 512        dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
 513                 RREG32(mmCP_STALLED_STAT1));
 514        dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
 515                 RREG32(mmCP_STALLED_STAT2));
 516        dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
 517                 RREG32(mmCP_STALLED_STAT3));
 518        dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
 519                 RREG32(mmCP_CPF_BUSY_STAT));
 520        dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
 521                 RREG32(mmCP_CPF_STALLED_STAT1));
 522        dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
 523        dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
 524        dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
 525                 RREG32(mmCP_CPC_STALLED_STAT1));
 526        dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
 527}
 528
 529/**
 530 * vi_gpu_check_soft_reset - check which blocks are busy
 531 *
 532 * @adev: amdgpu_device pointer
 533 *
 534 * Check which blocks are busy and return the relevant reset
 535 * mask to be used by vi_gpu_soft_reset().
 536 * Returns a mask of the blocks to be reset.
 537 */
 538u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
 539{
 540        u32 reset_mask = 0;
 541        u32 tmp;
 542
 543        /* GRBM_STATUS */
 544        tmp = RREG32(mmGRBM_STATUS);
 545        if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
 546                   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
 547                   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
 548                   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
 549                   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
 550                   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
 551                reset_mask |= AMDGPU_RESET_GFX;
 552
 553        if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
 554                reset_mask |= AMDGPU_RESET_CP;
 555
 556        /* GRBM_STATUS2 */
 557        tmp = RREG32(mmGRBM_STATUS2);
 558        if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
 559                reset_mask |= AMDGPU_RESET_RLC;
 560
 561        if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
 562                   GRBM_STATUS2__CPC_BUSY_MASK |
 563                   GRBM_STATUS2__CPG_BUSY_MASK))
 564                reset_mask |= AMDGPU_RESET_CP;
 565
 566        /* SRBM_STATUS2 */
 567        tmp = RREG32(mmSRBM_STATUS2);
 568        if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
 569                reset_mask |= AMDGPU_RESET_DMA;
 570
 571        if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
 572                reset_mask |= AMDGPU_RESET_DMA1;
 573
 574        /* SRBM_STATUS */
 575        tmp = RREG32(mmSRBM_STATUS);
 576
 577        if (tmp & SRBM_STATUS__IH_BUSY_MASK)
 578                reset_mask |= AMDGPU_RESET_IH;
 579
 580        if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
 581                reset_mask |= AMDGPU_RESET_SEM;
 582
 583        if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
 584                reset_mask |= AMDGPU_RESET_GRBM;
 585
 586        if (adev->asic_type != CHIP_TOPAZ) {
 587                if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
 588                           SRBM_STATUS__UVD_BUSY_MASK))
 589                        reset_mask |= AMDGPU_RESET_UVD;
 590        }
 591
 592        if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
 593                reset_mask |= AMDGPU_RESET_VMC;
 594
 595        if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 596                   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
 597                reset_mask |= AMDGPU_RESET_MC;
 598
 599        /* SDMA0_STATUS_REG */
 600        tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
 601        if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
 602                reset_mask |= AMDGPU_RESET_DMA;
 603
 604        /* SDMA1_STATUS_REG */
 605        tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
 606        if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
 607                reset_mask |= AMDGPU_RESET_DMA1;
 608#if 0
 609        /* VCE_STATUS */
 610        if (adev->asic_type != CHIP_TOPAZ) {
 611                tmp = RREG32(mmVCE_STATUS);
 612                if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
 613                        reset_mask |= AMDGPU_RESET_VCE;
 614                if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
 615                        reset_mask |= AMDGPU_RESET_VCE1;
 616
 617        }
 618
 619        if (adev->asic_type != CHIP_TOPAZ) {
 620                if (amdgpu_display_is_display_hung(adev))
 621                        reset_mask |= AMDGPU_RESET_DISPLAY;
 622        }
 623#endif
 624
 625        /* Skip MC reset as it's mostly likely not hung, just busy */
 626        if (reset_mask & AMDGPU_RESET_MC) {
 627                DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
 628                reset_mask &= ~AMDGPU_RESET_MC;
 629        }
 630
 631        return reset_mask;
 632}
 633
 634/**
 635 * vi_gpu_soft_reset - soft reset GPU
 636 *
 637 * @adev: amdgpu_device pointer
 638 * @reset_mask: mask of which blocks to reset
 639 *
 640 * Soft reset the blocks specified in @reset_mask.
 641 */
 642static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
 643{
 644        struct amdgpu_mode_mc_save save;
 645        u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
 646        u32 tmp;
 647
 648        if (reset_mask == 0)
 649                return;
 650
 651        dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
 652
 653        vi_print_gpu_status_regs(adev);
 654        dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
 655                 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
 656        dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
 657                 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
 658
 659        /* disable CG/PG */
 660
 661        /* stop the rlc */
 662        //XXX
 663        //gfx_v8_0_rlc_stop(adev);
 664
 665        /* Disable GFX parsing/prefetching */
 666        tmp = RREG32(mmCP_ME_CNTL);
 667        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
 668        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
 669        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
 670        WREG32(mmCP_ME_CNTL, tmp);
 671
 672        /* Disable MEC parsing/prefetching */
 673        tmp = RREG32(mmCP_MEC_CNTL);
 674        tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
 675        tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
 676        WREG32(mmCP_MEC_CNTL, tmp);
 677
 678        if (reset_mask & AMDGPU_RESET_DMA) {
 679                /* sdma0 */
 680                tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
 681                tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
 682                WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
 683        }
 684        if (reset_mask & AMDGPU_RESET_DMA1) {
 685                /* sdma1 */
 686                tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
 687                tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
 688                WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
 689        }
 690
 691        gmc_v8_0_mc_stop(adev, &save);
 692        if (amdgpu_asic_wait_for_mc_idle(adev)) {
 693                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
 694        }
 695
 696        if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
 697                grbm_soft_reset =
 698                        REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
 699                grbm_soft_reset =
 700                        REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
 701        }
 702
 703        if (reset_mask & AMDGPU_RESET_CP) {
 704                grbm_soft_reset =
 705                        REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
 706                srbm_soft_reset =
 707                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 708        }
 709
 710        if (reset_mask & AMDGPU_RESET_DMA)
 711                srbm_soft_reset =
 712                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
 713
 714        if (reset_mask & AMDGPU_RESET_DMA1)
 715                srbm_soft_reset =
 716                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
 717
 718        if (reset_mask & AMDGPU_RESET_DISPLAY)
 719                srbm_soft_reset =
 720                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
 721
 722        if (reset_mask & AMDGPU_RESET_RLC)
 723                grbm_soft_reset =
 724                        REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
 725
 726        if (reset_mask & AMDGPU_RESET_SEM)
 727                srbm_soft_reset =
 728                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
 729
 730        if (reset_mask & AMDGPU_RESET_IH)
 731                srbm_soft_reset =
 732                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
 733
 734        if (reset_mask & AMDGPU_RESET_GRBM)
 735                srbm_soft_reset =
 736                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
 737
 738        if (reset_mask & AMDGPU_RESET_VMC)
 739                srbm_soft_reset =
 740                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
 741
 742        if (reset_mask & AMDGPU_RESET_UVD)
 743                srbm_soft_reset =
 744                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 745
 746        if (reset_mask & AMDGPU_RESET_VCE)
 747                srbm_soft_reset =
 748                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
 749
 750        if (reset_mask & AMDGPU_RESET_VCE)
 751                srbm_soft_reset =
 752                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
 753
 754        if (!(adev->flags & AMDGPU_IS_APU)) {
 755                if (reset_mask & AMDGPU_RESET_MC)
 756                srbm_soft_reset =
 757                        REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 758        }
 759
 760        if (grbm_soft_reset) {
 761                tmp = RREG32(mmGRBM_SOFT_RESET);
 762                tmp |= grbm_soft_reset;
 763                dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
 764                WREG32(mmGRBM_SOFT_RESET, tmp);
 765                tmp = RREG32(mmGRBM_SOFT_RESET);
 766
 767                udelay(50);
 768
 769                tmp &= ~grbm_soft_reset;
 770                WREG32(mmGRBM_SOFT_RESET, tmp);
 771                tmp = RREG32(mmGRBM_SOFT_RESET);
 772        }
 773
 774        if (srbm_soft_reset) {
 775                tmp = RREG32(mmSRBM_SOFT_RESET);
 776                tmp |= srbm_soft_reset;
 777                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
 778                WREG32(mmSRBM_SOFT_RESET, tmp);
 779                tmp = RREG32(mmSRBM_SOFT_RESET);
 780
 781                udelay(50);
 782
 783                tmp &= ~srbm_soft_reset;
 784                WREG32(mmSRBM_SOFT_RESET, tmp);
 785                tmp = RREG32(mmSRBM_SOFT_RESET);
 786        }
 787
 788        /* Wait a little for things to settle down */
 789        udelay(50);
 790
 791        gmc_v8_0_mc_resume(adev, &save);
 792        udelay(50);
 793
 794        vi_print_gpu_status_regs(adev);
 795}
 796
 797static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 798{
 799        struct amdgpu_mode_mc_save save;
 800        u32 tmp, i;
 801
 802        dev_info(adev->dev, "GPU pci config reset\n");
 803
 804        /* disable dpm? */
 805
 806        /* disable cg/pg */
 807
 808        /* Disable GFX parsing/prefetching */
 809        tmp = RREG32(mmCP_ME_CNTL);
 810        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
 811        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
 812        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
 813        WREG32(mmCP_ME_CNTL, tmp);
 814
 815        /* Disable MEC parsing/prefetching */
 816        tmp = RREG32(mmCP_MEC_CNTL);
 817        tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
 818        tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
 819        WREG32(mmCP_MEC_CNTL, tmp);
 820
 821        /* Disable GFX parsing/prefetching */
 822        WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
 823                CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
 824
 825        /* Disable MEC parsing/prefetching */
 826        WREG32(mmCP_MEC_CNTL,
 827                        CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
 828
 829        /* sdma0 */
 830        tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
 831        tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
 832        WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
 833
 834        /* sdma1 */
 835        tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
 836        tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
 837        WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
 838
 839        /* XXX other engines? */
 840
 841        /* halt the rlc, disable cp internal ints */
 842        //XXX
 843        //gfx_v8_0_rlc_stop(adev);
 844
 845        udelay(50);
 846
 847        /* disable mem access */
 848        gmc_v8_0_mc_stop(adev, &save);
 849        if (amdgpu_asic_wait_for_mc_idle(adev)) {
 850                dev_warn(adev->dev, "Wait for MC idle timed out !\n");
 851        }
 852
 853        /* disable BM */
 854        pci_clear_master(adev->pdev);
 855        /* reset */
 856        amdgpu_pci_config_reset(adev);
 857
 858        udelay(100);
 859
 860        /* wait for asic to come out of reset */
 861        for (i = 0; i < adev->usec_timeout; i++) {
 862                if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
 863                        break;
 864                udelay(1);
 865        }
 866
 867}
 868
 869static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
 870{
 871        u32 tmp = RREG32(mmBIOS_SCRATCH_3);
 872
 873        if (hung)
 874                tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
 875        else
 876                tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
 877
 878        WREG32(mmBIOS_SCRATCH_3, tmp);
 879}
 880
 881/**
 882 * vi_asic_reset - soft reset GPU
 883 *
 884 * @adev: amdgpu_device pointer
 885 *
 886 * Look up which blocks are hung and attempt
 887 * to reset them.
 888 * Returns 0 for success.
 889 */
 890static int vi_asic_reset(struct amdgpu_device *adev)
 891{
 892        u32 reset_mask;
 893
 894        reset_mask = vi_gpu_check_soft_reset(adev);
 895
 896        if (reset_mask)
 897                vi_set_bios_scratch_engine_hung(adev, true);
 898
 899        /* try soft reset */
 900        vi_gpu_soft_reset(adev, reset_mask);
 901
 902        reset_mask = vi_gpu_check_soft_reset(adev);
 903
 904        /* try pci config reset */
 905        if (reset_mask && amdgpu_hard_reset)
 906                vi_gpu_pci_config_reset(adev);
 907
 908        reset_mask = vi_gpu_check_soft_reset(adev);
 909
 910        if (!reset_mask)
 911                vi_set_bios_scratch_engine_hung(adev, false);
 912
 913        return 0;
 914}
 915
 916static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 917                        u32 cntl_reg, u32 status_reg)
 918{
 919        int r, i;
 920        struct atom_clock_dividers dividers;
 921        uint32_t tmp;
 922
 923        r = amdgpu_atombios_get_clock_dividers(adev,
 924                                               COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
 925                                               clock, false, &dividers);
 926        if (r)
 927                return r;
 928
 929        tmp = RREG32_SMC(cntl_reg);
 930        tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
 931                CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
 932        tmp |= dividers.post_divider;
 933        WREG32_SMC(cntl_reg, tmp);
 934
 935        for (i = 0; i < 100; i++) {
 936                if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
 937                        break;
 938                mdelay(10);
 939        }
 940        if (i == 100)
 941                return -ETIMEDOUT;
 942
 943        return 0;
 944}
 945
 946static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 947{
 948        int r;
 949
 950        r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 951        if (r)
 952                return r;
 953
 954        r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 955
 956        return 0;
 957}
 958
 959static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 960{
 961        /* todo */
 962
 963        return 0;
 964}
 965
 966static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
 967{
 968        u32 mask;
 969        int ret;
 970
 971        if (amdgpu_pcie_gen2 == 0)
 972                return;
 973
 974        if (adev->flags & AMDGPU_IS_APU)
 975                return;
 976
 977        ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
 978        if (ret != 0)
 979                return;
 980
 981        if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
 982                return;
 983
 984        /* todo */
 985}
 986
 987static void vi_program_aspm(struct amdgpu_device *adev)
 988{
 989
 990        if (amdgpu_aspm == 0)
 991                return;
 992
 993        /* todo */
 994}
 995
 996static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
 997                                        bool enable)
 998{
 999        u32 tmp;
1000
1001        /* not necessary on CZ */
1002        if (adev->flags & AMDGPU_IS_APU)
1003                return;
1004
1005        tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1006        if (enable)
1007                tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1008        else
1009                tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1010
1011        WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1012}
1013
1014/* topaz has no DCE, UVD, VCE */
1015static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1016{
1017        /* ORDER MATTERS! */
1018        {
1019                .type = AMD_IP_BLOCK_TYPE_COMMON,
1020                .major = 2,
1021                .minor = 0,
1022                .rev = 0,
1023                .funcs = &vi_common_ip_funcs,
1024        },
1025        {
1026                .type = AMD_IP_BLOCK_TYPE_GMC,
1027                .major = 8,
1028                .minor = 0,
1029                .rev = 0,
1030                .funcs = &gmc_v8_0_ip_funcs,
1031        },
1032        {
1033                .type = AMD_IP_BLOCK_TYPE_IH,
1034                .major = 2,
1035                .minor = 4,
1036                .rev = 0,
1037                .funcs = &iceland_ih_ip_funcs,
1038        },
1039        {
1040                .type = AMD_IP_BLOCK_TYPE_SMC,
1041                .major = 7,
1042                .minor = 1,
1043                .rev = 0,
1044                .funcs = &iceland_dpm_ip_funcs,
1045        },
1046        {
1047                .type = AMD_IP_BLOCK_TYPE_GFX,
1048                .major = 8,
1049                .minor = 0,
1050                .rev = 0,
1051                .funcs = &gfx_v8_0_ip_funcs,
1052        },
1053        {
1054                .type = AMD_IP_BLOCK_TYPE_SDMA,
1055                .major = 2,
1056                .minor = 4,
1057                .rev = 0,
1058                .funcs = &sdma_v2_4_ip_funcs,
1059        },
1060};
1061
1062static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1063{
1064        /* ORDER MATTERS! */
1065        {
1066                .type = AMD_IP_BLOCK_TYPE_COMMON,
1067                .major = 2,
1068                .minor = 0,
1069                .rev = 0,
1070                .funcs = &vi_common_ip_funcs,
1071        },
1072        {
1073                .type = AMD_IP_BLOCK_TYPE_GMC,
1074                .major = 8,
1075                .minor = 0,
1076                .rev = 0,
1077                .funcs = &gmc_v8_0_ip_funcs,
1078        },
1079        {
1080                .type = AMD_IP_BLOCK_TYPE_IH,
1081                .major = 3,
1082                .minor = 0,
1083                .rev = 0,
1084                .funcs = &tonga_ih_ip_funcs,
1085        },
1086        {
1087                .type = AMD_IP_BLOCK_TYPE_SMC,
1088                .major = 7,
1089                .minor = 1,
1090                .rev = 0,
1091                .funcs = &tonga_dpm_ip_funcs,
1092        },
1093        {
1094                .type = AMD_IP_BLOCK_TYPE_DCE,
1095                .major = 10,
1096                .minor = 0,
1097                .rev = 0,
1098                .funcs = &dce_v10_0_ip_funcs,
1099        },
1100        {
1101                .type = AMD_IP_BLOCK_TYPE_GFX,
1102                .major = 8,
1103                .minor = 0,
1104                .rev = 0,
1105                .funcs = &gfx_v8_0_ip_funcs,
1106        },
1107        {
1108                .type = AMD_IP_BLOCK_TYPE_SDMA,
1109                .major = 3,
1110                .minor = 0,
1111                .rev = 0,
1112                .funcs = &sdma_v3_0_ip_funcs,
1113        },
1114        {
1115                .type = AMD_IP_BLOCK_TYPE_UVD,
1116                .major = 5,
1117                .minor = 0,
1118                .rev = 0,
1119                .funcs = &uvd_v5_0_ip_funcs,
1120        },
1121        {
1122                .type = AMD_IP_BLOCK_TYPE_VCE,
1123                .major = 3,
1124                .minor = 0,
1125                .rev = 0,
1126                .funcs = &vce_v3_0_ip_funcs,
1127        },
1128};
1129
1130static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1131{
1132        /* ORDER MATTERS! */
1133        {
1134                .type = AMD_IP_BLOCK_TYPE_COMMON,
1135                .major = 2,
1136                .minor = 0,
1137                .rev = 0,
1138                .funcs = &vi_common_ip_funcs,
1139        },
1140        {
1141                .type = AMD_IP_BLOCK_TYPE_GMC,
1142                .major = 8,
1143                .minor = 0,
1144                .rev = 0,
1145                .funcs = &gmc_v8_0_ip_funcs,
1146        },
1147        {
1148                .type = AMD_IP_BLOCK_TYPE_IH,
1149                .major = 3,
1150                .minor = 0,
1151                .rev = 0,
1152                .funcs = &cz_ih_ip_funcs,
1153        },
1154        {
1155                .type = AMD_IP_BLOCK_TYPE_SMC,
1156                .major = 8,
1157                .minor = 0,
1158                .rev = 0,
1159                .funcs = &cz_dpm_ip_funcs,
1160        },
1161        {
1162                .type = AMD_IP_BLOCK_TYPE_DCE,
1163                .major = 11,
1164                .minor = 0,
1165                .rev = 0,
1166                .funcs = &dce_v11_0_ip_funcs,
1167        },
1168        {
1169                .type = AMD_IP_BLOCK_TYPE_GFX,
1170                .major = 8,
1171                .minor = 0,
1172                .rev = 0,
1173                .funcs = &gfx_v8_0_ip_funcs,
1174        },
1175        {
1176                .type = AMD_IP_BLOCK_TYPE_SDMA,
1177                .major = 3,
1178                .minor = 0,
1179                .rev = 0,
1180                .funcs = &sdma_v3_0_ip_funcs,
1181        },
1182        {
1183                .type = AMD_IP_BLOCK_TYPE_UVD,
1184                .major = 6,
1185                .minor = 0,
1186                .rev = 0,
1187                .funcs = &uvd_v6_0_ip_funcs,
1188        },
1189        {
1190                .type = AMD_IP_BLOCK_TYPE_VCE,
1191                .major = 3,
1192                .minor = 0,
1193                .rev = 0,
1194                .funcs = &vce_v3_0_ip_funcs,
1195        },
1196};
1197
1198int vi_set_ip_blocks(struct amdgpu_device *adev)
1199{
1200        switch (adev->asic_type) {
1201        case CHIP_TOPAZ:
1202                adev->ip_blocks = topaz_ip_blocks;
1203                adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1204                break;
1205        case CHIP_TONGA:
1206                adev->ip_blocks = tonga_ip_blocks;
1207                adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1208                break;
1209        case CHIP_CARRIZO:
1210                adev->ip_blocks = cz_ip_blocks;
1211                adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1212                break;
1213        default:
1214                /* FIXME: not supported yet */
1215                return -EINVAL;
1216        }
1217
1218        return 0;
1219}
1220
1221static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1222{
1223        if (adev->asic_type == CHIP_TOPAZ)
1224                return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1225                        >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1226        else
1227                return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1228                        >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1229}
1230
1231static const struct amdgpu_asic_funcs vi_asic_funcs =
1232{
1233        .read_disabled_bios = &vi_read_disabled_bios,
1234        .read_register = &vi_read_register,
1235        .reset = &vi_asic_reset,
1236        .set_vga_state = &vi_vga_set_state,
1237        .get_xclk = &vi_get_xclk,
1238        .set_uvd_clocks = &vi_set_uvd_clocks,
1239        .set_vce_clocks = &vi_set_vce_clocks,
1240        .get_cu_info = &gfx_v8_0_get_cu_info,
1241        /* these should be moved to their own ip modules */
1242        .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1243        .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1244};
1245
1246static int vi_common_early_init(void *handle)
1247{
1248        bool smc_enabled = false;
1249        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250
1251        if (adev->flags & AMDGPU_IS_APU) {
1252                adev->smc_rreg = &cz_smc_rreg;
1253                adev->smc_wreg = &cz_smc_wreg;
1254        } else {
1255                adev->smc_rreg = &vi_smc_rreg;
1256                adev->smc_wreg = &vi_smc_wreg;
1257        }
1258        adev->pcie_rreg = &vi_pcie_rreg;
1259        adev->pcie_wreg = &vi_pcie_wreg;
1260        adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1261        adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1262        adev->didt_rreg = &vi_didt_rreg;
1263        adev->didt_wreg = &vi_didt_wreg;
1264
1265        adev->asic_funcs = &vi_asic_funcs;
1266
1267        if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1268                (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1269                smc_enabled = true;
1270
1271        adev->rev_id = vi_get_rev_id(adev);
1272        adev->external_rev_id = 0xFF;
1273        switch (adev->asic_type) {
1274        case CHIP_TOPAZ:
1275                adev->has_uvd = false;
1276                adev->cg_flags = 0;
1277                adev->pg_flags = 0;
1278                adev->external_rev_id = 0x1;
1279                if (amdgpu_smc_load_fw && smc_enabled)
1280                        adev->firmware.smu_load = true;
1281                break;
1282        case CHIP_TONGA:
1283                adev->has_uvd = true;
1284                adev->cg_flags = 0;
1285                adev->pg_flags = 0;
1286                adev->external_rev_id = adev->rev_id + 0x14;
1287                if (amdgpu_smc_load_fw && smc_enabled)
1288                        adev->firmware.smu_load = true;
1289                break;
1290        case CHIP_CARRIZO:
1291                adev->has_uvd = true;
1292                adev->cg_flags = 0;
1293                adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
1294                adev->external_rev_id = adev->rev_id + 0x1;
1295                if (amdgpu_smc_load_fw && smc_enabled)
1296                        adev->firmware.smu_load = true;
1297                break;
1298        default:
1299                /* FIXME: not supported yet */
1300                return -EINVAL;
1301        }
1302
1303        return 0;
1304}
1305
1306static int vi_common_sw_init(void *handle)
1307{
1308        return 0;
1309}
1310
1311static int vi_common_sw_fini(void *handle)
1312{
1313        return 0;
1314}
1315
1316static int vi_common_hw_init(void *handle)
1317{
1318        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319
1320        /* move the golden regs per IP block */
1321        vi_init_golden_registers(adev);
1322        /* enable pcie gen2/3 link */
1323        vi_pcie_gen3_enable(adev);
1324        /* enable aspm */
1325        vi_program_aspm(adev);
1326        /* enable the doorbell aperture */
1327        vi_enable_doorbell_aperture(adev, true);
1328
1329        return 0;
1330}
1331
1332static int vi_common_hw_fini(void *handle)
1333{
1334        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1335
1336        /* enable the doorbell aperture */
1337        vi_enable_doorbell_aperture(adev, false);
1338
1339        return 0;
1340}
1341
1342static int vi_common_suspend(void *handle)
1343{
1344        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1345
1346        return vi_common_hw_fini(adev);
1347}
1348
1349static int vi_common_resume(void *handle)
1350{
1351        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1352
1353        return vi_common_hw_init(adev);
1354}
1355
1356static bool vi_common_is_idle(void *handle)
1357{
1358        return true;
1359}
1360
1361static int vi_common_wait_for_idle(void *handle)
1362{
1363        return 0;
1364}
1365
1366static void vi_common_print_status(void *handle)
1367{
1368        return;
1369}
1370
1371static int vi_common_soft_reset(void *handle)
1372{
1373        return 0;
1374}
1375
1376static int vi_common_set_clockgating_state(void *handle,
1377                                            enum amd_clockgating_state state)
1378{
1379        return 0;
1380}
1381
1382static int vi_common_set_powergating_state(void *handle,
1383                                            enum amd_powergating_state state)
1384{
1385        return 0;
1386}
1387
1388const struct amd_ip_funcs vi_common_ip_funcs = {
1389        .early_init = vi_common_early_init,
1390        .late_init = NULL,
1391        .sw_init = vi_common_sw_init,
1392        .sw_fini = vi_common_sw_fini,
1393        .hw_init = vi_common_hw_init,
1394        .hw_fini = vi_common_hw_fini,
1395        .suspend = vi_common_suspend,
1396        .resume = vi_common_resume,
1397        .is_idle = vi_common_is_idle,
1398        .wait_for_idle = vi_common_wait_for_idle,
1399        .soft_reset = vi_common_soft_reset,
1400        .print_status = vi_common_print_status,
1401        .set_clockgating_state = vi_common_set_clockgating_state,
1402        .set_powergating_state = vi_common_set_powergating_state,
1403};
1404
1405