linux/drivers/gpu/drm/amd/amdgpu/vi.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/slab.h>
  24#include <drm/drmP.h>
  25#include "amdgpu.h"
  26#include "amdgpu_atombios.h"
  27#include "amdgpu_ih.h"
  28#include "amdgpu_uvd.h"
  29#include "amdgpu_vce.h"
  30#include "amdgpu_ucode.h"
  31#include "atom.h"
  32#include "amd_pcie.h"
  33
  34#include "gmc/gmc_8_1_d.h"
  35#include "gmc/gmc_8_1_sh_mask.h"
  36
  37#include "oss/oss_3_0_d.h"
  38#include "oss/oss_3_0_sh_mask.h"
  39
  40#include "bif/bif_5_0_d.h"
  41#include "bif/bif_5_0_sh_mask.h"
  42
  43#include "gca/gfx_8_0_d.h"
  44#include "gca/gfx_8_0_sh_mask.h"
  45
  46#include "smu/smu_7_1_1_d.h"
  47#include "smu/smu_7_1_1_sh_mask.h"
  48
  49#include "uvd/uvd_5_0_d.h"
  50#include "uvd/uvd_5_0_sh_mask.h"
  51
  52#include "vce/vce_3_0_d.h"
  53#include "vce/vce_3_0_sh_mask.h"
  54
  55#include "dce/dce_10_0_d.h"
  56#include "dce/dce_10_0_sh_mask.h"
  57
  58#include "vid.h"
  59#include "vi.h"
  60#include "vi_dpm.h"
  61#include "gmc_v8_0.h"
  62#include "gmc_v7_0.h"
  63#include "gfx_v8_0.h"
  64#include "sdma_v2_4.h"
  65#include "sdma_v3_0.h"
  66#include "dce_v10_0.h"
  67#include "dce_v11_0.h"
  68#include "iceland_ih.h"
  69#include "tonga_ih.h"
  70#include "cz_ih.h"
  71#include "uvd_v5_0.h"
  72#include "uvd_v6_0.h"
  73#include "vce_v3_0.h"
  74#include "amdgpu_powerplay.h"
  75#if defined(CONFIG_DRM_AMD_ACP)
  76#include "amdgpu_acp.h"
  77#endif
  78#include "dce_virtual.h"
  79#include "mxgpu_vi.h"
  80#include "amdgpu_dm.h"
  81
  82/*
  83 * Indirect registers accessor
  84 */
  85static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  86{
  87        unsigned long flags;
  88        u32 r;
  89
  90        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  91        WREG32(mmPCIE_INDEX, reg);
  92        (void)RREG32(mmPCIE_INDEX);
  93        r = RREG32(mmPCIE_DATA);
  94        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  95        return r;
  96}
  97
  98static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  99{
 100        unsigned long flags;
 101
 102        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 103        WREG32(mmPCIE_INDEX, reg);
 104        (void)RREG32(mmPCIE_INDEX);
 105        WREG32(mmPCIE_DATA, v);
 106        (void)RREG32(mmPCIE_DATA);
 107        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 108}
 109
 110static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
 111{
 112        unsigned long flags;
 113        u32 r;
 114
 115        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 116        WREG32(mmSMC_IND_INDEX_11, (reg));
 117        r = RREG32(mmSMC_IND_DATA_11);
 118        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 119        return r;
 120}
 121
 122static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 123{
 124        unsigned long flags;
 125
 126        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 127        WREG32(mmSMC_IND_INDEX_11, (reg));
 128        WREG32(mmSMC_IND_DATA_11, (v));
 129        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 130}
 131
 132/* smu_8_0_d.h */
 133#define mmMP0PUB_IND_INDEX                                                      0x180
 134#define mmMP0PUB_IND_DATA                                                       0x181
 135
 136static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
 137{
 138        unsigned long flags;
 139        u32 r;
 140
 141        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 142        WREG32(mmMP0PUB_IND_INDEX, (reg));
 143        r = RREG32(mmMP0PUB_IND_DATA);
 144        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 145        return r;
 146}
 147
 148static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 149{
 150        unsigned long flags;
 151
 152        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 153        WREG32(mmMP0PUB_IND_INDEX, (reg));
 154        WREG32(mmMP0PUB_IND_DATA, (v));
 155        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 156}
 157
 158static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 159{
 160        unsigned long flags;
 161        u32 r;
 162
 163        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 164        WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
 165        r = RREG32(mmUVD_CTX_DATA);
 166        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 167        return r;
 168}
 169
 170static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 171{
 172        unsigned long flags;
 173
 174        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 175        WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
 176        WREG32(mmUVD_CTX_DATA, (v));
 177        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 178}
 179
 180static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
 181{
 182        unsigned long flags;
 183        u32 r;
 184
 185        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 186        WREG32(mmDIDT_IND_INDEX, (reg));
 187        r = RREG32(mmDIDT_IND_DATA);
 188        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 189        return r;
 190}
 191
 192static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 193{
 194        unsigned long flags;
 195
 196        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 197        WREG32(mmDIDT_IND_INDEX, (reg));
 198        WREG32(mmDIDT_IND_DATA, (v));
 199        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 200}
 201
 202static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
 203{
 204        unsigned long flags;
 205        u32 r;
 206
 207        spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 208        WREG32(mmGC_CAC_IND_INDEX, (reg));
 209        r = RREG32(mmGC_CAC_IND_DATA);
 210        spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 211        return r;
 212}
 213
 214static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 215{
 216        unsigned long flags;
 217
 218        spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 219        WREG32(mmGC_CAC_IND_INDEX, (reg));
 220        WREG32(mmGC_CAC_IND_DATA, (v));
 221        spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 222}
 223
 224
 225static const u32 tonga_mgcg_cgcg_init[] =
 226{
 227        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 228        mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 229        mmPCIE_DATA, 0x000f0000, 0x00000000,
 230        mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
 231        mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 232        mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 233        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 234};
 235
 236static const u32 fiji_mgcg_cgcg_init[] =
 237{
 238        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 239        mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 240        mmPCIE_DATA, 0x000f0000, 0x00000000,
 241        mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
 242        mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 243        mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 244        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 245};
 246
 247static const u32 iceland_mgcg_cgcg_init[] =
 248{
 249        mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
 250        mmPCIE_DATA, 0x000f0000, 0x00000000,
 251        mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
 252        mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
 253        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 254};
 255
 256static const u32 cz_mgcg_cgcg_init[] =
 257{
 258        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 259        mmPCIE_INDEX, 0xffffffff, 0x0140001c,
 260        mmPCIE_DATA, 0x000f0000, 0x00000000,
 261        mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
 262        mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 263};
 264
 265static const u32 stoney_mgcg_cgcg_init[] =
 266{
 267        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
 268        mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
 269        mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
 270};
 271
 272static void vi_init_golden_registers(struct amdgpu_device *adev)
 273{
 274        /* Some of the registers might be dependent on GRBM_GFX_INDEX */
 275        mutex_lock(&adev->grbm_idx_mutex);
 276
 277        if (amdgpu_sriov_vf(adev)) {
 278                xgpu_vi_init_golden_registers(adev);
 279                mutex_unlock(&adev->grbm_idx_mutex);
 280                return;
 281        }
 282
 283        switch (adev->asic_type) {
 284        case CHIP_TOPAZ:
 285                amdgpu_program_register_sequence(adev,
 286                                                 iceland_mgcg_cgcg_init,
 287                                                 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 288                break;
 289        case CHIP_FIJI:
 290                amdgpu_program_register_sequence(adev,
 291                                                 fiji_mgcg_cgcg_init,
 292                                                 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
 293                break;
 294        case CHIP_TONGA:
 295                amdgpu_program_register_sequence(adev,
 296                                                 tonga_mgcg_cgcg_init,
 297                                                 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
 298                break;
 299        case CHIP_CARRIZO:
 300                amdgpu_program_register_sequence(adev,
 301                                                 cz_mgcg_cgcg_init,
 302                                                 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
 303                break;
 304        case CHIP_STONEY:
 305                amdgpu_program_register_sequence(adev,
 306                                                 stoney_mgcg_cgcg_init,
 307                                                 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
 308                break;
 309        case CHIP_POLARIS11:
 310        case CHIP_POLARIS10:
 311        case CHIP_POLARIS12:
 312        default:
 313                break;
 314        }
 315        mutex_unlock(&adev->grbm_idx_mutex);
 316}
 317
 318/**
 319 * vi_get_xclk - get the xclk
 320 *
 321 * @adev: amdgpu_device pointer
 322 *
 323 * Returns the reference clock used by the gfx engine
 324 * (VI).
 325 */
 326static u32 vi_get_xclk(struct amdgpu_device *adev)
 327{
 328        u32 reference_clock = adev->clock.spll.reference_freq;
 329        u32 tmp;
 330
 331        if (adev->flags & AMD_IS_APU)
 332                return reference_clock;
 333
 334        tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
 335        if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
 336                return 1000;
 337
 338        tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
 339        if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
 340                return reference_clock / 4;
 341
 342        return reference_clock;
 343}
 344
 345/**
 346 * vi_srbm_select - select specific register instances
 347 *
 348 * @adev: amdgpu_device pointer
 349 * @me: selected ME (micro engine)
 350 * @pipe: pipe
 351 * @queue: queue
 352 * @vmid: VMID
 353 *
 354 * Switches the currently active registers instances.  Some
 355 * registers are instanced per VMID, others are instanced per
 356 * me/pipe/queue combination.
 357 */
 358void vi_srbm_select(struct amdgpu_device *adev,
 359                     u32 me, u32 pipe, u32 queue, u32 vmid)
 360{
 361        u32 srbm_gfx_cntl = 0;
 362        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
 363        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
 364        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
 365        srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
 366        WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
 367}
 368
 369static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
 370{
 371        /* todo */
 372}
 373
 374static bool vi_read_disabled_bios(struct amdgpu_device *adev)
 375{
 376        u32 bus_cntl;
 377        u32 d1vga_control = 0;
 378        u32 d2vga_control = 0;
 379        u32 vga_render_control = 0;
 380        u32 rom_cntl;
 381        bool r;
 382
 383        bus_cntl = RREG32(mmBUS_CNTL);
 384        if (adev->mode_info.num_crtc) {
 385                d1vga_control = RREG32(mmD1VGA_CONTROL);
 386                d2vga_control = RREG32(mmD2VGA_CONTROL);
 387                vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
 388        }
 389        rom_cntl = RREG32_SMC(ixROM_CNTL);
 390
 391        /* enable the rom */
 392        WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
 393        if (adev->mode_info.num_crtc) {
 394                /* Disable VGA mode */
 395                WREG32(mmD1VGA_CONTROL,
 396                       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
 397                                          D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
 398                WREG32(mmD2VGA_CONTROL,
 399                       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
 400                                          D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
 401                WREG32(mmVGA_RENDER_CONTROL,
 402                       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
 403        }
 404        WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
 405
 406        r = amdgpu_read_bios(adev);
 407
 408        /* restore regs */
 409        WREG32(mmBUS_CNTL, bus_cntl);
 410        if (adev->mode_info.num_crtc) {
 411                WREG32(mmD1VGA_CONTROL, d1vga_control);
 412                WREG32(mmD2VGA_CONTROL, d2vga_control);
 413                WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
 414        }
 415        WREG32_SMC(ixROM_CNTL, rom_cntl);
 416        return r;
 417}
 418
 419static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
 420                                  u8 *bios, u32 length_bytes)
 421{
 422        u32 *dw_ptr;
 423        unsigned long flags;
 424        u32 i, length_dw;
 425
 426        if (bios == NULL)
 427                return false;
 428        if (length_bytes == 0)
 429                return false;
 430        /* APU vbios image is part of sbios image */
 431        if (adev->flags & AMD_IS_APU)
 432                return false;
 433
 434        dw_ptr = (u32 *)bios;
 435        length_dw = ALIGN(length_bytes, 4) / 4;
 436        /* take the smc lock since we are using the smc index */
 437        spin_lock_irqsave(&adev->smc_idx_lock, flags);
 438        /* set rom index to 0 */
 439        WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
 440        WREG32(mmSMC_IND_DATA_11, 0);
 441        /* set index to data for continous read */
 442        WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
 443        for (i = 0; i < length_dw; i++)
 444                dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
 445        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 446
 447        return true;
 448}
 449
 450static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
 451{
 452        uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
 453        /* bit0: 0 means pf and 1 means vf */
 454        /* bit31: 0 means disable IOV and 1 means enable */
 455        if (reg & 1)
 456                adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
 457
 458        if (reg & 0x80000000)
 459                adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
 460
 461        if (reg == 0) {
 462                if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
 463                        adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
 464        }
 465}
 466
 467static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 468        {mmGRBM_STATUS},
 469        {mmGRBM_STATUS2},
 470        {mmGRBM_STATUS_SE0},
 471        {mmGRBM_STATUS_SE1},
 472        {mmGRBM_STATUS_SE2},
 473        {mmGRBM_STATUS_SE3},
 474        {mmSRBM_STATUS},
 475        {mmSRBM_STATUS2},
 476        {mmSRBM_STATUS3},
 477        {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
 478        {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
 479        {mmCP_STAT},
 480        {mmCP_STALLED_STAT1},
 481        {mmCP_STALLED_STAT2},
 482        {mmCP_STALLED_STAT3},
 483        {mmCP_CPF_BUSY_STAT},
 484        {mmCP_CPF_STALLED_STAT1},
 485        {mmCP_CPF_STATUS},
 486        {mmCP_CPC_BUSY_STAT},
 487        {mmCP_CPC_STALLED_STAT1},
 488        {mmCP_CPC_STATUS},
 489        {mmGB_ADDR_CONFIG},
 490        {mmMC_ARB_RAMCFG},
 491        {mmGB_TILE_MODE0},
 492        {mmGB_TILE_MODE1},
 493        {mmGB_TILE_MODE2},
 494        {mmGB_TILE_MODE3},
 495        {mmGB_TILE_MODE4},
 496        {mmGB_TILE_MODE5},
 497        {mmGB_TILE_MODE6},
 498        {mmGB_TILE_MODE7},
 499        {mmGB_TILE_MODE8},
 500        {mmGB_TILE_MODE9},
 501        {mmGB_TILE_MODE10},
 502        {mmGB_TILE_MODE11},
 503        {mmGB_TILE_MODE12},
 504        {mmGB_TILE_MODE13},
 505        {mmGB_TILE_MODE14},
 506        {mmGB_TILE_MODE15},
 507        {mmGB_TILE_MODE16},
 508        {mmGB_TILE_MODE17},
 509        {mmGB_TILE_MODE18},
 510        {mmGB_TILE_MODE19},
 511        {mmGB_TILE_MODE20},
 512        {mmGB_TILE_MODE21},
 513        {mmGB_TILE_MODE22},
 514        {mmGB_TILE_MODE23},
 515        {mmGB_TILE_MODE24},
 516        {mmGB_TILE_MODE25},
 517        {mmGB_TILE_MODE26},
 518        {mmGB_TILE_MODE27},
 519        {mmGB_TILE_MODE28},
 520        {mmGB_TILE_MODE29},
 521        {mmGB_TILE_MODE30},
 522        {mmGB_TILE_MODE31},
 523        {mmGB_MACROTILE_MODE0},
 524        {mmGB_MACROTILE_MODE1},
 525        {mmGB_MACROTILE_MODE2},
 526        {mmGB_MACROTILE_MODE3},
 527        {mmGB_MACROTILE_MODE4},
 528        {mmGB_MACROTILE_MODE5},
 529        {mmGB_MACROTILE_MODE6},
 530        {mmGB_MACROTILE_MODE7},
 531        {mmGB_MACROTILE_MODE8},
 532        {mmGB_MACROTILE_MODE9},
 533        {mmGB_MACROTILE_MODE10},
 534        {mmGB_MACROTILE_MODE11},
 535        {mmGB_MACROTILE_MODE12},
 536        {mmGB_MACROTILE_MODE13},
 537        {mmGB_MACROTILE_MODE14},
 538        {mmGB_MACROTILE_MODE15},
 539        {mmCC_RB_BACKEND_DISABLE, true},
 540        {mmGC_USER_RB_BACKEND_DISABLE, true},
 541        {mmGB_BACKEND_MAP, false},
 542        {mmPA_SC_RASTER_CONFIG, true},
 543        {mmPA_SC_RASTER_CONFIG_1, true},
 544};
 545
 546static uint32_t vi_get_register_value(struct amdgpu_device *adev,
 547                                      bool indexed, u32 se_num,
 548                                      u32 sh_num, u32 reg_offset)
 549{
 550        if (indexed) {
 551                uint32_t val;
 552                unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
 553                unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
 554
 555                switch (reg_offset) {
 556                case mmCC_RB_BACKEND_DISABLE:
 557                        return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
 558                case mmGC_USER_RB_BACKEND_DISABLE:
 559                        return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
 560                case mmPA_SC_RASTER_CONFIG:
 561                        return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
 562                case mmPA_SC_RASTER_CONFIG_1:
 563                        return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
 564                }
 565
 566                mutex_lock(&adev->grbm_idx_mutex);
 567                if (se_num != 0xffffffff || sh_num != 0xffffffff)
 568                        amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 569
 570                val = RREG32(reg_offset);
 571
 572                if (se_num != 0xffffffff || sh_num != 0xffffffff)
 573                        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 574                mutex_unlock(&adev->grbm_idx_mutex);
 575                return val;
 576        } else {
 577                unsigned idx;
 578
 579                switch (reg_offset) {
 580                case mmGB_ADDR_CONFIG:
 581                        return adev->gfx.config.gb_addr_config;
 582                case mmMC_ARB_RAMCFG:
 583                        return adev->gfx.config.mc_arb_ramcfg;
 584                case mmGB_TILE_MODE0:
 585                case mmGB_TILE_MODE1:
 586                case mmGB_TILE_MODE2:
 587                case mmGB_TILE_MODE3:
 588                case mmGB_TILE_MODE4:
 589                case mmGB_TILE_MODE5:
 590                case mmGB_TILE_MODE6:
 591                case mmGB_TILE_MODE7:
 592                case mmGB_TILE_MODE8:
 593                case mmGB_TILE_MODE9:
 594                case mmGB_TILE_MODE10:
 595                case mmGB_TILE_MODE11:
 596                case mmGB_TILE_MODE12:
 597                case mmGB_TILE_MODE13:
 598                case mmGB_TILE_MODE14:
 599                case mmGB_TILE_MODE15:
 600                case mmGB_TILE_MODE16:
 601                case mmGB_TILE_MODE17:
 602                case mmGB_TILE_MODE18:
 603                case mmGB_TILE_MODE19:
 604                case mmGB_TILE_MODE20:
 605                case mmGB_TILE_MODE21:
 606                case mmGB_TILE_MODE22:
 607                case mmGB_TILE_MODE23:
 608                case mmGB_TILE_MODE24:
 609                case mmGB_TILE_MODE25:
 610                case mmGB_TILE_MODE26:
 611                case mmGB_TILE_MODE27:
 612                case mmGB_TILE_MODE28:
 613                case mmGB_TILE_MODE29:
 614                case mmGB_TILE_MODE30:
 615                case mmGB_TILE_MODE31:
 616                        idx = (reg_offset - mmGB_TILE_MODE0);
 617                        return adev->gfx.config.tile_mode_array[idx];
 618                case mmGB_MACROTILE_MODE0:
 619                case mmGB_MACROTILE_MODE1:
 620                case mmGB_MACROTILE_MODE2:
 621                case mmGB_MACROTILE_MODE3:
 622                case mmGB_MACROTILE_MODE4:
 623                case mmGB_MACROTILE_MODE5:
 624                case mmGB_MACROTILE_MODE6:
 625                case mmGB_MACROTILE_MODE7:
 626                case mmGB_MACROTILE_MODE8:
 627                case mmGB_MACROTILE_MODE9:
 628                case mmGB_MACROTILE_MODE10:
 629                case mmGB_MACROTILE_MODE11:
 630                case mmGB_MACROTILE_MODE12:
 631                case mmGB_MACROTILE_MODE13:
 632                case mmGB_MACROTILE_MODE14:
 633                case mmGB_MACROTILE_MODE15:
 634                        idx = (reg_offset - mmGB_MACROTILE_MODE0);
 635                        return adev->gfx.config.macrotile_mode_array[idx];
 636                default:
 637                        return RREG32(reg_offset);
 638                }
 639        }
 640}
 641
 642static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
 643                            u32 sh_num, u32 reg_offset, u32 *value)
 644{
 645        uint32_t i;
 646
 647        *value = 0;
 648        for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
 649                bool indexed = vi_allowed_read_registers[i].grbm_indexed;
 650
 651                if (reg_offset != vi_allowed_read_registers[i].reg_offset)
 652                        continue;
 653
 654                *value = vi_get_register_value(adev, indexed, se_num, sh_num,
 655                                               reg_offset);
 656                return 0;
 657        }
 658        return -EINVAL;
 659}
 660
 661static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 662{
 663        u32 i;
 664
 665        dev_info(adev->dev, "GPU pci config reset\n");
 666
 667        /* disable BM */
 668        pci_clear_master(adev->pdev);
 669        /* reset */
 670        amdgpu_pci_config_reset(adev);
 671
 672        udelay(100);
 673
 674        /* wait for asic to come out of reset */
 675        for (i = 0; i < adev->usec_timeout; i++) {
 676                if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
 677                        /* enable BM */
 678                        pci_set_master(adev->pdev);
 679                        adev->has_hw_reset = true;
 680                        return 0;
 681                }
 682                udelay(1);
 683        }
 684        return -EINVAL;
 685}
 686
 687/**
 688 * vi_asic_reset - soft reset GPU
 689 *
 690 * @adev: amdgpu_device pointer
 691 *
 692 * Look up which blocks are hung and attempt
 693 * to reset them.
 694 * Returns 0 for success.
 695 */
 696static int vi_asic_reset(struct amdgpu_device *adev)
 697{
 698        int r;
 699
 700        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 701
 702        r = vi_gpu_pci_config_reset(adev);
 703
 704        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 705
 706        return r;
 707}
 708
 709static u32 vi_get_config_memsize(struct amdgpu_device *adev)
 710{
 711        return RREG32(mmCONFIG_MEMSIZE);
 712}
 713
 714static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 715                        u32 cntl_reg, u32 status_reg)
 716{
 717        int r, i;
 718        struct atom_clock_dividers dividers;
 719        uint32_t tmp;
 720
 721        r = amdgpu_atombios_get_clock_dividers(adev,
 722                                               COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
 723                                               clock, false, &dividers);
 724        if (r)
 725                return r;
 726
 727        tmp = RREG32_SMC(cntl_reg);
 728        tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
 729                CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
 730        tmp |= dividers.post_divider;
 731        WREG32_SMC(cntl_reg, tmp);
 732
 733        for (i = 0; i < 100; i++) {
 734                if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
 735                        break;
 736                mdelay(10);
 737        }
 738        if (i == 100)
 739                return -ETIMEDOUT;
 740
 741        return 0;
 742}
 743
 744static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 745{
 746        int r;
 747
 748        r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 749        if (r)
 750                return r;
 751
 752        r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 753        if (r)
 754                return r;
 755
 756        return 0;
 757}
 758
 759static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 760{
 761        int r, i;
 762        struct atom_clock_dividers dividers;
 763        u32 tmp;
 764
 765        r = amdgpu_atombios_get_clock_dividers(adev,
 766                                               COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
 767                                               ecclk, false, &dividers);
 768        if (r)
 769                return r;
 770
 771        for (i = 0; i < 100; i++) {
 772                if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
 773                        break;
 774                mdelay(10);
 775        }
 776        if (i == 100)
 777                return -ETIMEDOUT;
 778
 779        tmp = RREG32_SMC(ixCG_ECLK_CNTL);
 780        tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
 781                CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
 782        tmp |= dividers.post_divider;
 783        WREG32_SMC(ixCG_ECLK_CNTL, tmp);
 784
 785        for (i = 0; i < 100; i++) {
 786                if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
 787                        break;
 788                mdelay(10);
 789        }
 790        if (i == 100)
 791                return -ETIMEDOUT;
 792
 793        return 0;
 794}
 795
 796static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
 797{
 798        if (pci_is_root_bus(adev->pdev->bus))
 799                return;
 800
 801        if (amdgpu_pcie_gen2 == 0)
 802                return;
 803
 804        if (adev->flags & AMD_IS_APU)
 805                return;
 806
 807        if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 808                                        CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 809                return;
 810
 811        /* todo */
 812}
 813
 814static void vi_program_aspm(struct amdgpu_device *adev)
 815{
 816
 817        if (amdgpu_aspm == 0)
 818                return;
 819
 820        /* todo */
 821}
 822
 823static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
 824                                        bool enable)
 825{
 826        u32 tmp;
 827
 828        /* not necessary on CZ */
 829        if (adev->flags & AMD_IS_APU)
 830                return;
 831
 832        tmp = RREG32(mmBIF_DOORBELL_APER_EN);
 833        if (enable)
 834                tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
 835        else
 836                tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
 837
 838        WREG32(mmBIF_DOORBELL_APER_EN, tmp);
 839}
 840
 841#define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
 842#define ATI_REV_ID_FUSE_MACRO__SHIFT        9
 843#define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
 844
 845static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
 846{
 847        if (adev->flags & AMD_IS_APU)
 848                return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
 849                        >> ATI_REV_ID_FUSE_MACRO__SHIFT;
 850        else
 851                return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
 852                        >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
 853}
 854
 855static const struct amdgpu_asic_funcs vi_asic_funcs =
 856{
 857        .read_disabled_bios = &vi_read_disabled_bios,
 858        .read_bios_from_rom = &vi_read_bios_from_rom,
 859        .read_register = &vi_read_register,
 860        .reset = &vi_asic_reset,
 861        .set_vga_state = &vi_vga_set_state,
 862        .get_xclk = &vi_get_xclk,
 863        .set_uvd_clocks = &vi_set_uvd_clocks,
 864        .set_vce_clocks = &vi_set_vce_clocks,
 865        .get_config_memsize = &vi_get_config_memsize,
 866};
 867
 868#define CZ_REV_BRISTOL(rev)      \
 869        ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
 870
 871static int vi_common_early_init(void *handle)
 872{
 873        bool smc_enabled = false;
 874        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 875
 876        if (adev->flags & AMD_IS_APU) {
 877                adev->smc_rreg = &cz_smc_rreg;
 878                adev->smc_wreg = &cz_smc_wreg;
 879        } else {
 880                adev->smc_rreg = &vi_smc_rreg;
 881                adev->smc_wreg = &vi_smc_wreg;
 882        }
 883        adev->pcie_rreg = &vi_pcie_rreg;
 884        adev->pcie_wreg = &vi_pcie_wreg;
 885        adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
 886        adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
 887        adev->didt_rreg = &vi_didt_rreg;
 888        adev->didt_wreg = &vi_didt_wreg;
 889        adev->gc_cac_rreg = &vi_gc_cac_rreg;
 890        adev->gc_cac_wreg = &vi_gc_cac_wreg;
 891
 892        adev->asic_funcs = &vi_asic_funcs;
 893
 894        if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
 895                (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
 896                smc_enabled = true;
 897
 898        adev->rev_id = vi_get_rev_id(adev);
 899        adev->external_rev_id = 0xFF;
 900        switch (adev->asic_type) {
 901        case CHIP_TOPAZ:
 902                adev->cg_flags = 0;
 903                adev->pg_flags = 0;
 904                adev->external_rev_id = 0x1;
 905                break;
 906        case CHIP_FIJI:
 907                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 908                        AMD_CG_SUPPORT_GFX_MGLS |
 909                        AMD_CG_SUPPORT_GFX_RLC_LS |
 910                        AMD_CG_SUPPORT_GFX_CP_LS |
 911                        AMD_CG_SUPPORT_GFX_CGTS |
 912                        AMD_CG_SUPPORT_GFX_CGTS_LS |
 913                        AMD_CG_SUPPORT_GFX_CGCG |
 914                        AMD_CG_SUPPORT_GFX_CGLS |
 915                        AMD_CG_SUPPORT_SDMA_MGCG |
 916                        AMD_CG_SUPPORT_SDMA_LS |
 917                        AMD_CG_SUPPORT_BIF_LS |
 918                        AMD_CG_SUPPORT_HDP_MGCG |
 919                        AMD_CG_SUPPORT_HDP_LS |
 920                        AMD_CG_SUPPORT_ROM_MGCG |
 921                        AMD_CG_SUPPORT_MC_MGCG |
 922                        AMD_CG_SUPPORT_MC_LS |
 923                        AMD_CG_SUPPORT_UVD_MGCG;
 924                adev->pg_flags = 0;
 925                adev->external_rev_id = adev->rev_id + 0x3c;
 926                break;
 927        case CHIP_TONGA:
 928                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 929                        AMD_CG_SUPPORT_GFX_CGCG |
 930                        AMD_CG_SUPPORT_GFX_CGLS |
 931                        AMD_CG_SUPPORT_SDMA_MGCG |
 932                        AMD_CG_SUPPORT_SDMA_LS |
 933                        AMD_CG_SUPPORT_BIF_LS |
 934                        AMD_CG_SUPPORT_HDP_MGCG |
 935                        AMD_CG_SUPPORT_HDP_LS |
 936                        AMD_CG_SUPPORT_ROM_MGCG |
 937                        AMD_CG_SUPPORT_MC_MGCG |
 938                        AMD_CG_SUPPORT_MC_LS |
 939                        AMD_CG_SUPPORT_DRM_LS |
 940                        AMD_CG_SUPPORT_UVD_MGCG;
 941                adev->pg_flags = 0;
 942                adev->external_rev_id = adev->rev_id + 0x14;
 943                break;
 944        case CHIP_POLARIS11:
 945                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 946                        AMD_CG_SUPPORT_GFX_RLC_LS |
 947                        AMD_CG_SUPPORT_GFX_CP_LS |
 948                        AMD_CG_SUPPORT_GFX_CGCG |
 949                        AMD_CG_SUPPORT_GFX_CGLS |
 950                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 951                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 952                        AMD_CG_SUPPORT_SDMA_MGCG |
 953                        AMD_CG_SUPPORT_SDMA_LS |
 954                        AMD_CG_SUPPORT_BIF_MGCG |
 955                        AMD_CG_SUPPORT_BIF_LS |
 956                        AMD_CG_SUPPORT_HDP_MGCG |
 957                        AMD_CG_SUPPORT_HDP_LS |
 958                        AMD_CG_SUPPORT_ROM_MGCG |
 959                        AMD_CG_SUPPORT_MC_MGCG |
 960                        AMD_CG_SUPPORT_MC_LS |
 961                        AMD_CG_SUPPORT_DRM_LS |
 962                        AMD_CG_SUPPORT_UVD_MGCG |
 963                        AMD_CG_SUPPORT_VCE_MGCG;
 964                adev->pg_flags = 0;
 965                adev->external_rev_id = adev->rev_id + 0x5A;
 966                break;
 967        case CHIP_POLARIS10:
 968                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 969                        AMD_CG_SUPPORT_GFX_RLC_LS |
 970                        AMD_CG_SUPPORT_GFX_CP_LS |
 971                        AMD_CG_SUPPORT_GFX_CGCG |
 972                        AMD_CG_SUPPORT_GFX_CGLS |
 973                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 974                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 975                        AMD_CG_SUPPORT_SDMA_MGCG |
 976                        AMD_CG_SUPPORT_SDMA_LS |
 977                        AMD_CG_SUPPORT_BIF_MGCG |
 978                        AMD_CG_SUPPORT_BIF_LS |
 979                        AMD_CG_SUPPORT_HDP_MGCG |
 980                        AMD_CG_SUPPORT_HDP_LS |
 981                        AMD_CG_SUPPORT_ROM_MGCG |
 982                        AMD_CG_SUPPORT_MC_MGCG |
 983                        AMD_CG_SUPPORT_MC_LS |
 984                        AMD_CG_SUPPORT_DRM_LS |
 985                        AMD_CG_SUPPORT_UVD_MGCG |
 986                        AMD_CG_SUPPORT_VCE_MGCG;
 987                adev->pg_flags = 0;
 988                adev->external_rev_id = adev->rev_id + 0x50;
 989                break;
 990        case CHIP_POLARIS12:
 991                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 992                        AMD_CG_SUPPORT_GFX_RLC_LS |
 993                        AMD_CG_SUPPORT_GFX_CP_LS |
 994                        AMD_CG_SUPPORT_GFX_CGCG |
 995                        AMD_CG_SUPPORT_GFX_CGLS |
 996                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 997                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 998                        AMD_CG_SUPPORT_SDMA_MGCG |
 999                        AMD_CG_SUPPORT_SDMA_LS |
1000                        AMD_CG_SUPPORT_BIF_MGCG |
1001                        AMD_CG_SUPPORT_BIF_LS |
1002                        AMD_CG_SUPPORT_HDP_MGCG |
1003                        AMD_CG_SUPPORT_HDP_LS |
1004                        AMD_CG_SUPPORT_ROM_MGCG |
1005                        AMD_CG_SUPPORT_MC_MGCG |
1006                        AMD_CG_SUPPORT_MC_LS |
1007                        AMD_CG_SUPPORT_DRM_LS |
1008                        AMD_CG_SUPPORT_UVD_MGCG |
1009                        AMD_CG_SUPPORT_VCE_MGCG;
1010                adev->pg_flags = 0;
1011                adev->external_rev_id = adev->rev_id + 0x64;
1012                break;
1013        case CHIP_CARRIZO:
1014                adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1015                        AMD_CG_SUPPORT_GFX_MGCG |
1016                        AMD_CG_SUPPORT_GFX_MGLS |
1017                        AMD_CG_SUPPORT_GFX_RLC_LS |
1018                        AMD_CG_SUPPORT_GFX_CP_LS |
1019                        AMD_CG_SUPPORT_GFX_CGTS |
1020                        AMD_CG_SUPPORT_GFX_CGTS_LS |
1021                        AMD_CG_SUPPORT_GFX_CGCG |
1022                        AMD_CG_SUPPORT_GFX_CGLS |
1023                        AMD_CG_SUPPORT_BIF_LS |
1024                        AMD_CG_SUPPORT_HDP_MGCG |
1025                        AMD_CG_SUPPORT_HDP_LS |
1026                        AMD_CG_SUPPORT_SDMA_MGCG |
1027                        AMD_CG_SUPPORT_SDMA_LS |
1028                        AMD_CG_SUPPORT_VCE_MGCG;
1029                /* rev0 hardware requires workarounds to support PG */
1030                adev->pg_flags = 0;
1031                if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1032                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1033                                AMD_PG_SUPPORT_GFX_PIPELINE |
1034                                AMD_PG_SUPPORT_CP |
1035                                AMD_PG_SUPPORT_UVD |
1036                                AMD_PG_SUPPORT_VCE;
1037                }
1038                adev->external_rev_id = adev->rev_id + 0x1;
1039                break;
1040        case CHIP_STONEY:
1041                adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1042                        AMD_CG_SUPPORT_GFX_MGCG |
1043                        AMD_CG_SUPPORT_GFX_MGLS |
1044                        AMD_CG_SUPPORT_GFX_RLC_LS |
1045                        AMD_CG_SUPPORT_GFX_CP_LS |
1046                        AMD_CG_SUPPORT_GFX_CGTS |
1047                        AMD_CG_SUPPORT_GFX_CGTS_LS |
1048                        AMD_CG_SUPPORT_GFX_CGCG |
1049                        AMD_CG_SUPPORT_GFX_CGLS |
1050                        AMD_CG_SUPPORT_BIF_LS |
1051                        AMD_CG_SUPPORT_HDP_MGCG |
1052                        AMD_CG_SUPPORT_HDP_LS |
1053                        AMD_CG_SUPPORT_SDMA_MGCG |
1054                        AMD_CG_SUPPORT_SDMA_LS |
1055                        AMD_CG_SUPPORT_VCE_MGCG;
1056                adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1057                        AMD_PG_SUPPORT_GFX_SMG |
1058                        AMD_PG_SUPPORT_GFX_PIPELINE |
1059                        AMD_PG_SUPPORT_CP |
1060                        AMD_PG_SUPPORT_UVD |
1061                        AMD_PG_SUPPORT_VCE;
1062                adev->external_rev_id = adev->rev_id + 0x61;
1063                break;
1064        default:
1065                /* FIXME: not supported yet */
1066                return -EINVAL;
1067        }
1068
1069        if (amdgpu_sriov_vf(adev)) {
1070                amdgpu_virt_init_setting(adev);
1071                xgpu_vi_mailbox_set_irq_funcs(adev);
1072        }
1073
1074        /* vi use smc load by default */
1075        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1076
1077        amdgpu_get_pcie_info(adev);
1078
1079        return 0;
1080}
1081
1082static int vi_common_late_init(void *handle)
1083{
1084        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1085
1086        if (amdgpu_sriov_vf(adev))
1087                xgpu_vi_mailbox_get_irq(adev);
1088
1089        return 0;
1090}
1091
1092static int vi_common_sw_init(void *handle)
1093{
1094        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1095
1096        if (amdgpu_sriov_vf(adev))
1097                xgpu_vi_mailbox_add_irq_id(adev);
1098
1099        return 0;
1100}
1101
1102static int vi_common_sw_fini(void *handle)
1103{
1104        return 0;
1105}
1106
1107static int vi_common_hw_init(void *handle)
1108{
1109        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1110
1111        /* move the golden regs per IP block */
1112        vi_init_golden_registers(adev);
1113        /* enable pcie gen2/3 link */
1114        vi_pcie_gen3_enable(adev);
1115        /* enable aspm */
1116        vi_program_aspm(adev);
1117        /* enable the doorbell aperture */
1118        vi_enable_doorbell_aperture(adev, true);
1119
1120        return 0;
1121}
1122
1123static int vi_common_hw_fini(void *handle)
1124{
1125        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1126
1127        /* enable the doorbell aperture */
1128        vi_enable_doorbell_aperture(adev, false);
1129
1130        if (amdgpu_sriov_vf(adev))
1131                xgpu_vi_mailbox_put_irq(adev);
1132
1133        return 0;
1134}
1135
1136static int vi_common_suspend(void *handle)
1137{
1138        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1139
1140        return vi_common_hw_fini(adev);
1141}
1142
1143static int vi_common_resume(void *handle)
1144{
1145        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146
1147        return vi_common_hw_init(adev);
1148}
1149
1150static bool vi_common_is_idle(void *handle)
1151{
1152        return true;
1153}
1154
1155static int vi_common_wait_for_idle(void *handle)
1156{
1157        return 0;
1158}
1159
1160static int vi_common_soft_reset(void *handle)
1161{
1162        return 0;
1163}
1164
1165static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1166                                                   bool enable)
1167{
1168        uint32_t temp, data;
1169
1170        temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1171
1172        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1173                data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1174                                PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1175                                PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1176        else
1177                data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1178                                PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1179                                PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1180
1181        if (temp != data)
1182                WREG32_PCIE(ixPCIE_CNTL2, data);
1183}
1184
1185static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1186                                                    bool enable)
1187{
1188        uint32_t temp, data;
1189
1190        temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1191
1192        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1193                data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1194        else
1195                data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1196
1197        if (temp != data)
1198                WREG32(mmHDP_HOST_PATH_CNTL, data);
1199}
1200
1201static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1202                                      bool enable)
1203{
1204        uint32_t temp, data;
1205
1206        temp = data = RREG32(mmHDP_MEM_POWER_LS);
1207
1208        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1209                data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1210        else
1211                data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1212
1213        if (temp != data)
1214                WREG32(mmHDP_MEM_POWER_LS, data);
1215}
1216
1217static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1218                                      bool enable)
1219{
1220        uint32_t temp, data;
1221
1222        temp = data = RREG32(0x157a);
1223
1224        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1225                data |= 1;
1226        else
1227                data &= ~1;
1228
1229        if (temp != data)
1230                WREG32(0x157a, data);
1231}
1232
1233
1234static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1235                                                    bool enable)
1236{
1237        uint32_t temp, data;
1238
1239        temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1240
1241        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1242                data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1243                                CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1244        else
1245                data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1246                                CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1247
1248        if (temp != data)
1249                WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1250}
1251
1252static int vi_common_set_clockgating_state_by_smu(void *handle,
1253                                           enum amd_clockgating_state state)
1254{
1255        uint32_t msg_id, pp_state = 0;
1256        uint32_t pp_support_state = 0;
1257        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258
1259        if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1260                if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1261                        pp_support_state = AMD_CG_SUPPORT_MC_LS;
1262                        pp_state = PP_STATE_LS;
1263                }
1264                if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1265                        pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
1266                        pp_state |= PP_STATE_CG;
1267                }
1268                if (state == AMD_CG_STATE_UNGATE)
1269                        pp_state = 0;
1270                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1271                               PP_BLOCK_SYS_MC,
1272                               pp_support_state,
1273                               pp_state);
1274                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1275                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1276        }
1277
1278        if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1279                if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1280                        pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
1281                        pp_state = PP_STATE_LS;
1282                }
1283                if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1284                        pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
1285                        pp_state |= PP_STATE_CG;
1286                }
1287                if (state == AMD_CG_STATE_UNGATE)
1288                        pp_state = 0;
1289                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1290                               PP_BLOCK_SYS_SDMA,
1291                               pp_support_state,
1292                               pp_state);
1293                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1294                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1295        }
1296
1297        if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1298                if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1299                        pp_support_state = AMD_CG_SUPPORT_HDP_LS;
1300                        pp_state = PP_STATE_LS;
1301                }
1302                if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1303                        pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
1304                        pp_state |= PP_STATE_CG;
1305                }
1306                if (state == AMD_CG_STATE_UNGATE)
1307                        pp_state = 0;
1308                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1309                               PP_BLOCK_SYS_HDP,
1310                               pp_support_state,
1311                               pp_state);
1312                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1313                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1314        }
1315
1316
1317        if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1318                if (state == AMD_CG_STATE_UNGATE)
1319                        pp_state = 0;
1320                else
1321                        pp_state = PP_STATE_LS;
1322
1323                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1324                               PP_BLOCK_SYS_BIF,
1325                               PP_STATE_SUPPORT_LS,
1326                                pp_state);
1327                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1328                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1329        }
1330        if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1331                if (state == AMD_CG_STATE_UNGATE)
1332                        pp_state = 0;
1333                else
1334                        pp_state = PP_STATE_CG;
1335
1336                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1337                               PP_BLOCK_SYS_BIF,
1338                               PP_STATE_SUPPORT_CG,
1339                               pp_state);
1340                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1341                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1342        }
1343
1344        if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1345
1346                if (state == AMD_CG_STATE_UNGATE)
1347                        pp_state = 0;
1348                else
1349                        pp_state = PP_STATE_LS;
1350
1351                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1352                               PP_BLOCK_SYS_DRM,
1353                               PP_STATE_SUPPORT_LS,
1354                               pp_state);
1355                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1356                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1357        }
1358
1359        if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1360
1361                if (state == AMD_CG_STATE_UNGATE)
1362                        pp_state = 0;
1363                else
1364                        pp_state = PP_STATE_CG;
1365
1366                msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1367                               PP_BLOCK_SYS_ROM,
1368                               PP_STATE_SUPPORT_CG,
1369                               pp_state);
1370                if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1371                        amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1372        }
1373        return 0;
1374}
1375
1376static int vi_common_set_clockgating_state(void *handle,
1377                                           enum amd_clockgating_state state)
1378{
1379        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1380
1381        if (amdgpu_sriov_vf(adev))
1382                return 0;
1383
1384        switch (adev->asic_type) {
1385        case CHIP_FIJI:
1386                vi_update_bif_medium_grain_light_sleep(adev,
1387                                state == AMD_CG_STATE_GATE);
1388                vi_update_hdp_medium_grain_clock_gating(adev,
1389                                state == AMD_CG_STATE_GATE);
1390                vi_update_hdp_light_sleep(adev,
1391                                state == AMD_CG_STATE_GATE);
1392                vi_update_rom_medium_grain_clock_gating(adev,
1393                                state == AMD_CG_STATE_GATE);
1394                break;
1395        case CHIP_CARRIZO:
1396        case CHIP_STONEY:
1397                vi_update_bif_medium_grain_light_sleep(adev,
1398                                state == AMD_CG_STATE_GATE);
1399                vi_update_hdp_medium_grain_clock_gating(adev,
1400                                state == AMD_CG_STATE_GATE);
1401                vi_update_hdp_light_sleep(adev,
1402                                state == AMD_CG_STATE_GATE);
1403                vi_update_drm_light_sleep(adev,
1404                                state == AMD_CG_STATE_GATE);
1405                break;
1406        case CHIP_TONGA:
1407        case CHIP_POLARIS10:
1408        case CHIP_POLARIS11:
1409        case CHIP_POLARIS12:
1410                vi_common_set_clockgating_state_by_smu(adev, state);
1411        default:
1412                break;
1413        }
1414        return 0;
1415}
1416
1417static int vi_common_set_powergating_state(void *handle,
1418                                            enum amd_powergating_state state)
1419{
1420        return 0;
1421}
1422
1423static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1424{
1425        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1426        int data;
1427
1428        if (amdgpu_sriov_vf(adev))
1429                *flags = 0;
1430
1431        /* AMD_CG_SUPPORT_BIF_LS */
1432        data = RREG32_PCIE(ixPCIE_CNTL2);
1433        if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1434                *flags |= AMD_CG_SUPPORT_BIF_LS;
1435
1436        /* AMD_CG_SUPPORT_HDP_LS */
1437        data = RREG32(mmHDP_MEM_POWER_LS);
1438        if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1439                *flags |= AMD_CG_SUPPORT_HDP_LS;
1440
1441        /* AMD_CG_SUPPORT_HDP_MGCG */
1442        data = RREG32(mmHDP_HOST_PATH_CNTL);
1443        if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1444                *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1445
1446        /* AMD_CG_SUPPORT_ROM_MGCG */
1447        data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1448        if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1449                *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1450}
1451
1452static const struct amd_ip_funcs vi_common_ip_funcs = {
1453        .name = "vi_common",
1454        .early_init = vi_common_early_init,
1455        .late_init = vi_common_late_init,
1456        .sw_init = vi_common_sw_init,
1457        .sw_fini = vi_common_sw_fini,
1458        .hw_init = vi_common_hw_init,
1459        .hw_fini = vi_common_hw_fini,
1460        .suspend = vi_common_suspend,
1461        .resume = vi_common_resume,
1462        .is_idle = vi_common_is_idle,
1463        .wait_for_idle = vi_common_wait_for_idle,
1464        .soft_reset = vi_common_soft_reset,
1465        .set_clockgating_state = vi_common_set_clockgating_state,
1466        .set_powergating_state = vi_common_set_powergating_state,
1467        .get_clockgating_state = vi_common_get_clockgating_state,
1468};
1469
1470static const struct amdgpu_ip_block_version vi_common_ip_block =
1471{
1472        .type = AMD_IP_BLOCK_TYPE_COMMON,
1473        .major = 1,
1474        .minor = 0,
1475        .rev = 0,
1476        .funcs = &vi_common_ip_funcs,
1477};
1478
1479int vi_set_ip_blocks(struct amdgpu_device *adev)
1480{
1481        /* in early init stage, vbios code won't work */
1482        vi_detect_hw_virtualization(adev);
1483
1484        if (amdgpu_sriov_vf(adev))
1485                adev->virt.ops = &xgpu_vi_virt_ops;
1486
1487        switch (adev->asic_type) {
1488        case CHIP_TOPAZ:
1489                /* topaz has no DCE, UVD, VCE */
1490                amdgpu_ip_block_add(adev, &vi_common_ip_block);
1491                amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
1492                amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
1493                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1494                if (adev->enable_virtual_display)
1495                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1496                amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1497                amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
1498                break;
1499        case CHIP_FIJI:
1500                amdgpu_ip_block_add(adev, &vi_common_ip_block);
1501                amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
1502                amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1503                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1504                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1505                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1506#if defined(CONFIG_DRM_AMD_DC)
1507                else if (amdgpu_device_has_dc_support(adev))
1508                        amdgpu_ip_block_add(adev, &dm_ip_block);
1509#endif
1510                else
1511                        amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
1512                amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1513                amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1514                if (!amdgpu_sriov_vf(adev)) {
1515                        amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1516                        amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1517                }
1518                break;
1519        case CHIP_TONGA:
1520                amdgpu_ip_block_add(adev, &vi_common_ip_block);
1521                amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1522                amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1523                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1524                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1525                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1526#if defined(CONFIG_DRM_AMD_DC)
1527                else if (amdgpu_device_has_dc_support(adev))
1528                        amdgpu_ip_block_add(adev, &dm_ip_block);
1529#endif
1530                else
1531                        amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
1532                amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1533                amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1534                if (!amdgpu_sriov_vf(adev)) {
1535                        amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
1536                        amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1537                }
1538                break;
1539        case CHIP_POLARIS11:
1540        case CHIP_POLARIS10:
1541        case CHIP_POLARIS12:
1542                amdgpu_ip_block_add(adev, &vi_common_ip_block);
1543                amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
1544                amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1545                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1546                if (adev->enable_virtual_display)
1547                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1548#if defined(CONFIG_DRM_AMD_DC)
1549                else if (amdgpu_device_has_dc_support(adev))
1550                        amdgpu_ip_block_add(adev, &dm_ip_block);
1551#endif
1552                else
1553                        amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
1554                amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1555                amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
1556                amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
1557                amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1558                break;
1559        case CHIP_CARRIZO:
1560                amdgpu_ip_block_add(adev, &vi_common_ip_block);
1561                amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1562                amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1563                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1564                if (adev->enable_virtual_display)
1565                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1566#if defined(CONFIG_DRM_AMD_DC)
1567                else if (amdgpu_device_has_dc_support(adev))
1568                        amdgpu_ip_block_add(adev, &dm_ip_block);
1569#endif
1570                else
1571                        amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1572                amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1573                amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1574                amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1575                amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
1576#if defined(CONFIG_DRM_AMD_ACP)
1577                amdgpu_ip_block_add(adev, &acp_ip_block);
1578#endif
1579                break;
1580        case CHIP_STONEY:
1581                amdgpu_ip_block_add(adev, &vi_common_ip_block);
1582                amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1583                amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1584                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1585                if (adev->enable_virtual_display)
1586                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1587#if defined(CONFIG_DRM_AMD_DC)
1588                else if (amdgpu_device_has_dc_support(adev))
1589                        amdgpu_ip_block_add(adev, &dm_ip_block);
1590#endif
1591                else
1592                        amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1593                amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
1594                amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1595                amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
1596                amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1597#if defined(CONFIG_DRM_AMD_ACP)
1598                amdgpu_ip_block_add(adev, &acp_ip_block);
1599#endif
1600                break;
1601        default:
1602                /* FIXME: not supported yet */
1603                return -EINVAL;
1604        }
1605
1606        return 0;
1607}
1608