linux/drivers/gpu/drm/amd/amdgpu/soc15.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <drm/drmP.h>
  27#include "amdgpu.h"
  28#include "amdgpu_atomfirmware.h"
  29#include "amdgpu_ih.h"
  30#include "amdgpu_uvd.h"
  31#include "amdgpu_vce.h"
  32#include "amdgpu_ucode.h"
  33#include "amdgpu_psp.h"
  34#include "atom.h"
  35#include "amd_pcie.h"
  36
  37#include "vega10/soc15ip.h"
  38#include "vega10/UVD/uvd_7_0_offset.h"
  39#include "vega10/GC/gc_9_0_offset.h"
  40#include "vega10/GC/gc_9_0_sh_mask.h"
  41#include "vega10/SDMA0/sdma0_4_0_offset.h"
  42#include "vega10/SDMA1/sdma1_4_0_offset.h"
  43#include "vega10/HDP/hdp_4_0_offset.h"
  44#include "vega10/HDP/hdp_4_0_sh_mask.h"
  45#include "vega10/MP/mp_9_0_offset.h"
  46#include "vega10/MP/mp_9_0_sh_mask.h"
  47#include "vega10/SMUIO/smuio_9_0_offset.h"
  48#include "vega10/SMUIO/smuio_9_0_sh_mask.h"
  49
  50#include "soc15.h"
  51#include "soc15_common.h"
  52#include "gfx_v9_0.h"
  53#include "gmc_v9_0.h"
  54#include "gfxhub_v1_0.h"
  55#include "mmhub_v1_0.h"
  56#include "vega10_ih.h"
  57#include "sdma_v4_0.h"
  58#include "uvd_v7_0.h"
  59#include "vce_v4_0.h"
  60#include "vcn_v1_0.h"
  61#include "amdgpu_powerplay.h"
  62#include "dce_virtual.h"
  63#include "mxgpu_ai.h"
  64
  65MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
  66
  67#define mmFabricConfigAccessControl                                                                    0x0410
  68#define mmFabricConfigAccessControl_BASE_IDX                                                           0
  69#define mmFabricConfigAccessControl_DEFAULT                                      0x00000000
  70//FabricConfigAccessControl
  71#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT                                                     0x0
  72#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT                                                0x1
  73#define FabricConfigAccessControl__CfgRegInstID__SHIFT                                                        0x10
  74#define FabricConfigAccessControl__CfgRegInstAccEn_MASK                                                       0x00000001L
  75#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK                                                  0x00000002L
  76#define FabricConfigAccessControl__CfgRegInstID_MASK                                                          0x00FF0000L
  77
  78
  79#define mmDF_PIE_AON0_DfGlobalClkGater                                                                 0x00fc
  80#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX                                                        0
  81//DF_PIE_AON0_DfGlobalClkGater
  82#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT                                                         0x0
  83#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK                                                           0x0000000FL
  84
  85enum {
  86        DF_MGCG_DISABLE = 0,
  87        DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
  88        DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
  89        DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
  90        DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
  91        DF_MGCG_ENABLE_63_CYCLE_DELAY =15
  92};
  93
  94#define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
  95#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
  96#define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
  97#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
  98
  99/*
 100 * Indirect registers accessor
 101 */
 102static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 103{
 104        unsigned long flags, address, data;
 105        u32 r;
 106        struct nbio_pcie_index_data *nbio_pcie_id;
 107
 108        if (adev->flags & AMD_IS_APU)
 109                nbio_pcie_id = &nbio_v7_0_pcie_index_data;
 110        else
 111                nbio_pcie_id = &nbio_v6_1_pcie_index_data;
 112
 113        address = nbio_pcie_id->index_offset;
 114        data = nbio_pcie_id->data_offset;
 115
 116        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 117        WREG32(address, reg);
 118        (void)RREG32(address);
 119        r = RREG32(data);
 120        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 121        return r;
 122}
 123
 124static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 125{
 126        unsigned long flags, address, data;
 127        struct nbio_pcie_index_data *nbio_pcie_id;
 128
 129        if (adev->flags & AMD_IS_APU)
 130                nbio_pcie_id = &nbio_v7_0_pcie_index_data;
 131        else
 132                nbio_pcie_id = &nbio_v6_1_pcie_index_data;
 133
 134        address = nbio_pcie_id->index_offset;
 135        data = nbio_pcie_id->data_offset;
 136
 137        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
 138        WREG32(address, reg);
 139        (void)RREG32(address);
 140        WREG32(data, v);
 141        (void)RREG32(data);
 142        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
 143}
 144
 145static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 146{
 147        unsigned long flags, address, data;
 148        u32 r;
 149
 150        address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 151        data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 152
 153        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 154        WREG32(address, ((reg) & 0x1ff));
 155        r = RREG32(data);
 156        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 157        return r;
 158}
 159
 160static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 161{
 162        unsigned long flags, address, data;
 163
 164        address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 165        data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 166
 167        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 168        WREG32(address, ((reg) & 0x1ff));
 169        WREG32(data, (v));
 170        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 171}
 172
 173static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
 174{
 175        unsigned long flags, address, data;
 176        u32 r;
 177
 178        address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 179        data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 180
 181        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 182        WREG32(address, (reg));
 183        r = RREG32(data);
 184        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 185        return r;
 186}
 187
 188static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 189{
 190        unsigned long flags, address, data;
 191
 192        address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 193        data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 194
 195        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 196        WREG32(address, (reg));
 197        WREG32(data, (v));
 198        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 199}
 200
 201static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 202{
 203        if (adev->flags & AMD_IS_APU)
 204                return nbio_v7_0_get_memsize(adev);
 205        else
 206                return nbio_v6_1_get_memsize(adev);
 207}
 208
 209static const u32 vega10_golden_init[] =
 210{
 211};
 212
 213static const u32 raven_golden_init[] =
 214{
 215};
 216
 217static void soc15_init_golden_registers(struct amdgpu_device *adev)
 218{
 219        /* Some of the registers might be dependent on GRBM_GFX_INDEX */
 220        mutex_lock(&adev->grbm_idx_mutex);
 221
 222        switch (adev->asic_type) {
 223        case CHIP_VEGA10:
 224                amdgpu_program_register_sequence(adev,
 225                                                 vega10_golden_init,
 226                                                 (const u32)ARRAY_SIZE(vega10_golden_init));
 227                break;
 228        case CHIP_RAVEN:
 229                amdgpu_program_register_sequence(adev,
 230                                                 raven_golden_init,
 231                                                 (const u32)ARRAY_SIZE(raven_golden_init));
 232                break;
 233        default:
 234                break;
 235        }
 236        mutex_unlock(&adev->grbm_idx_mutex);
 237}
 238static u32 soc15_get_xclk(struct amdgpu_device *adev)
 239{
 240        if (adev->asic_type == CHIP_VEGA10)
 241                return adev->clock.spll.reference_freq/4;
 242        else
 243                return adev->clock.spll.reference_freq;
 244}
 245
 246
 247void soc15_grbm_select(struct amdgpu_device *adev,
 248                     u32 me, u32 pipe, u32 queue, u32 vmid)
 249{
 250        u32 grbm_gfx_cntl = 0;
 251        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 252        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 253        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 254        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 255
 256        WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
 257}
 258
 259static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
 260{
 261        /* todo */
 262}
 263
 264static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
 265{
 266        /* todo */
 267        return false;
 268}
 269
 270static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
 271                                     u8 *bios, u32 length_bytes)
 272{
 273        u32 *dw_ptr;
 274        u32 i, length_dw;
 275
 276        if (bios == NULL)
 277                return false;
 278        if (length_bytes == 0)
 279                return false;
 280        /* APU vbios image is part of sbios image */
 281        if (adev->flags & AMD_IS_APU)
 282                return false;
 283
 284        dw_ptr = (u32 *)bios;
 285        length_dw = ALIGN(length_bytes, 4) / 4;
 286
 287        /* set rom index to 0 */
 288        WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
 289        /* read out the rom data */
 290        for (i = 0; i < length_dw; i++)
 291                dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
 292
 293        return true;
 294}
 295
 296static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
 297        { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
 298        { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
 299        { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
 300        { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
 301        { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
 302        { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
 303        { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
 304        { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
 305        { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
 306        { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
 307        { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
 308        { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
 309        { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
 310        { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
 311        { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
 312        { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
 313        { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
 314        { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
 315};
 316
 317static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 318                                         u32 sh_num, u32 reg_offset)
 319{
 320        uint32_t val;
 321
 322        mutex_lock(&adev->grbm_idx_mutex);
 323        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 324                amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 325
 326        val = RREG32(reg_offset);
 327
 328        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 329                amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 330        mutex_unlock(&adev->grbm_idx_mutex);
 331        return val;
 332}
 333
 334static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
 335                                         bool indexed, u32 se_num,
 336                                         u32 sh_num, u32 reg_offset)
 337{
 338        if (indexed) {
 339                return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
 340        } else {
 341                switch (reg_offset) {
 342                case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
 343                        return adev->gfx.config.gb_addr_config;
 344                default:
 345                        return RREG32(reg_offset);
 346                }
 347        }
 348}
 349
 350static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
 351                            u32 sh_num, u32 reg_offset, u32 *value)
 352{
 353        uint32_t i;
 354
 355        *value = 0;
 356        for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
 357                if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
 358                        continue;
 359
 360                *value = soc15_get_register_value(adev,
 361                                                  soc15_allowed_read_registers[i].grbm_indexed,
 362                                                  se_num, sh_num, reg_offset);
 363                return 0;
 364        }
 365        return -EINVAL;
 366}
 367
 368static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
 369{
 370        u32 i;
 371
 372        dev_info(adev->dev, "GPU pci config reset\n");
 373
 374        /* disable BM */
 375        pci_clear_master(adev->pdev);
 376        /* reset */
 377        amdgpu_pci_config_reset(adev);
 378
 379        udelay(100);
 380
 381        /* wait for asic to come out of reset */
 382        for (i = 0; i < adev->usec_timeout; i++) {
 383                u32 memsize = (adev->flags & AMD_IS_APU) ?
 384                        nbio_v7_0_get_memsize(adev) :
 385                        nbio_v6_1_get_memsize(adev);
 386                if (memsize != 0xffffffff)
 387                        break;
 388                udelay(1);
 389        }
 390
 391}
 392
 393static int soc15_asic_reset(struct amdgpu_device *adev)
 394{
 395        amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
 396
 397        soc15_gpu_pci_config_reset(adev);
 398
 399        amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
 400
 401        return 0;
 402}
 403
 404/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 405                        u32 cntl_reg, u32 status_reg)
 406{
 407        return 0;
 408}*/
 409
 410static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 411{
 412        /*int r;
 413
 414        r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 415        if (r)
 416                return r;
 417
 418        r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 419        */
 420        return 0;
 421}
 422
 423static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 424{
 425        /* todo */
 426
 427        return 0;
 428}
 429
 430static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
 431{
 432        if (pci_is_root_bus(adev->pdev->bus))
 433                return;
 434
 435        if (amdgpu_pcie_gen2 == 0)
 436                return;
 437
 438        if (adev->flags & AMD_IS_APU)
 439                return;
 440
 441        if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 442                                        CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 443                return;
 444
 445        /* todo */
 446}
 447
 448static void soc15_program_aspm(struct amdgpu_device *adev)
 449{
 450
 451        if (amdgpu_aspm == 0)
 452                return;
 453
 454        /* todo */
 455}
 456
 457static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
 458                                        bool enable)
 459{
 460        if (adev->flags & AMD_IS_APU) {
 461                nbio_v7_0_enable_doorbell_aperture(adev, enable);
 462        } else {
 463                nbio_v6_1_enable_doorbell_aperture(adev, enable);
 464                nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
 465        }
 466}
 467
 468static const struct amdgpu_ip_block_version vega10_common_ip_block =
 469{
 470        .type = AMD_IP_BLOCK_TYPE_COMMON,
 471        .major = 2,
 472        .minor = 0,
 473        .rev = 0,
 474        .funcs = &soc15_common_ip_funcs,
 475};
 476
 477int soc15_set_ip_blocks(struct amdgpu_device *adev)
 478{
 479        nbio_v6_1_detect_hw_virt(adev);
 480
 481        if (amdgpu_sriov_vf(adev))
 482                adev->virt.ops = &xgpu_ai_virt_ops;
 483
 484        switch (adev->asic_type) {
 485        case CHIP_VEGA10:
 486                amdgpu_ip_block_add(adev, &vega10_common_ip_block);
 487                amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
 488                amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
 489                if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
 490                        amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
 491                if (!amdgpu_sriov_vf(adev))
 492                        amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 493                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 494                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
 495                amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
 496                amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
 497                amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
 498                amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
 499                break;
 500        case CHIP_RAVEN:
 501                amdgpu_ip_block_add(adev, &vega10_common_ip_block);
 502                amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
 503                amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
 504                amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
 505                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
 506                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 507                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
 508                amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
 509                amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
 510                amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
 511                break;
 512        default:
 513                return -EINVAL;
 514        }
 515
 516        return 0;
 517}
 518
 519static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 520{
 521        if (adev->flags & AMD_IS_APU)
 522                return nbio_v7_0_get_rev_id(adev);
 523        else
 524                return nbio_v6_1_get_rev_id(adev);
 525}
 526
 527
 528int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
 529{
 530        /* to be implemented in MC IP*/
 531        return 0;
 532}
 533
 534static const struct amdgpu_asic_funcs soc15_asic_funcs =
 535{
 536        .read_disabled_bios = &soc15_read_disabled_bios,
 537        .read_bios_from_rom = &soc15_read_bios_from_rom,
 538        .read_register = &soc15_read_register,
 539        .reset = &soc15_asic_reset,
 540        .set_vga_state = &soc15_vga_set_state,
 541        .get_xclk = &soc15_get_xclk,
 542        .set_uvd_clocks = &soc15_set_uvd_clocks,
 543        .set_vce_clocks = &soc15_set_vce_clocks,
 544        .get_config_memsize = &soc15_get_config_memsize,
 545};
 546
 547static int soc15_common_early_init(void *handle)
 548{
 549        bool psp_enabled = false;
 550        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 551
 552        adev->smc_rreg = NULL;
 553        adev->smc_wreg = NULL;
 554        adev->pcie_rreg = &soc15_pcie_rreg;
 555        adev->pcie_wreg = &soc15_pcie_wreg;
 556        adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
 557        adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
 558        adev->didt_rreg = &soc15_didt_rreg;
 559        adev->didt_wreg = &soc15_didt_wreg;
 560
 561        adev->asic_funcs = &soc15_asic_funcs;
 562
 563        if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
 564                (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
 565                psp_enabled = true;
 566
 567        /*
 568         * nbio need be used for both sdma and gfx9, but only
 569         * initializes once
 570         */
 571        switch(adev->asic_type) {
 572        case CHIP_VEGA10:
 573                nbio_v6_1_init(adev);
 574                break;
 575        case CHIP_RAVEN:
 576                nbio_v7_0_init(adev);
 577                break;
 578        default:
 579                return -EINVAL;
 580        }
 581
 582        adev->rev_id = soc15_get_rev_id(adev);
 583        adev->external_rev_id = 0xFF;
 584        switch (adev->asic_type) {
 585        case CHIP_VEGA10:
 586                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 587                        AMD_CG_SUPPORT_GFX_MGLS |
 588                        AMD_CG_SUPPORT_GFX_RLC_LS |
 589                        AMD_CG_SUPPORT_GFX_CP_LS |
 590                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 591                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 592                        AMD_CG_SUPPORT_GFX_CGCG |
 593                        AMD_CG_SUPPORT_GFX_CGLS |
 594                        AMD_CG_SUPPORT_BIF_MGCG |
 595                        AMD_CG_SUPPORT_BIF_LS |
 596                        AMD_CG_SUPPORT_HDP_LS |
 597                        AMD_CG_SUPPORT_DRM_MGCG |
 598                        AMD_CG_SUPPORT_DRM_LS |
 599                        AMD_CG_SUPPORT_ROM_MGCG |
 600                        AMD_CG_SUPPORT_DF_MGCG |
 601                        AMD_CG_SUPPORT_SDMA_MGCG |
 602                        AMD_CG_SUPPORT_SDMA_LS |
 603                        AMD_CG_SUPPORT_MC_MGCG |
 604                        AMD_CG_SUPPORT_MC_LS;
 605                adev->pg_flags = 0;
 606                adev->external_rev_id = 0x1;
 607                break;
 608        case CHIP_RAVEN:
 609                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 610                        AMD_CG_SUPPORT_GFX_MGLS |
 611                        AMD_CG_SUPPORT_GFX_RLC_LS |
 612                        AMD_CG_SUPPORT_GFX_CP_LS |
 613                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 614                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 615                        AMD_CG_SUPPORT_GFX_CGCG |
 616                        AMD_CG_SUPPORT_GFX_CGLS |
 617                        AMD_CG_SUPPORT_BIF_MGCG |
 618                        AMD_CG_SUPPORT_BIF_LS |
 619                        AMD_CG_SUPPORT_HDP_MGCG |
 620                        AMD_CG_SUPPORT_HDP_LS |
 621                        AMD_CG_SUPPORT_DRM_MGCG |
 622                        AMD_CG_SUPPORT_DRM_LS |
 623                        AMD_CG_SUPPORT_ROM_MGCG |
 624                        AMD_CG_SUPPORT_MC_MGCG |
 625                        AMD_CG_SUPPORT_MC_LS |
 626                        AMD_CG_SUPPORT_SDMA_MGCG |
 627                        AMD_CG_SUPPORT_SDMA_LS;
 628                adev->pg_flags = AMD_PG_SUPPORT_SDMA |
 629                                 AMD_PG_SUPPORT_MMHUB;
 630                adev->external_rev_id = 0x1;
 631                break;
 632        default:
 633                /* FIXME: not supported yet */
 634                return -EINVAL;
 635        }
 636
 637        if (amdgpu_sriov_vf(adev)) {
 638                amdgpu_virt_init_setting(adev);
 639                xgpu_ai_mailbox_set_irq_funcs(adev);
 640        }
 641
 642        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 643
 644        amdgpu_get_pcie_info(adev);
 645
 646        return 0;
 647}
 648
 649static int soc15_common_late_init(void *handle)
 650{
 651        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 652
 653        if (amdgpu_sriov_vf(adev))
 654                xgpu_ai_mailbox_get_irq(adev);
 655
 656        return 0;
 657}
 658
 659static int soc15_common_sw_init(void *handle)
 660{
 661        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 662
 663        if (amdgpu_sriov_vf(adev))
 664                xgpu_ai_mailbox_add_irq_id(adev);
 665
 666        return 0;
 667}
 668
 669static int soc15_common_sw_fini(void *handle)
 670{
 671        return 0;
 672}
 673
 674static int soc15_common_hw_init(void *handle)
 675{
 676        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 677
 678        /* move the golden regs per IP block */
 679        soc15_init_golden_registers(adev);
 680        /* enable pcie gen2/3 link */
 681        soc15_pcie_gen3_enable(adev);
 682        /* enable aspm */
 683        soc15_program_aspm(adev);
 684        /* enable the doorbell aperture */
 685        soc15_enable_doorbell_aperture(adev, true);
 686
 687        return 0;
 688}
 689
 690static int soc15_common_hw_fini(void *handle)
 691{
 692        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 693
 694        /* disable the doorbell aperture */
 695        soc15_enable_doorbell_aperture(adev, false);
 696        if (amdgpu_sriov_vf(adev))
 697                xgpu_ai_mailbox_put_irq(adev);
 698
 699        return 0;
 700}
 701
 702static int soc15_common_suspend(void *handle)
 703{
 704        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 705
 706        return soc15_common_hw_fini(adev);
 707}
 708
 709static int soc15_common_resume(void *handle)
 710{
 711        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 712
 713        return soc15_common_hw_init(adev);
 714}
 715
 716static bool soc15_common_is_idle(void *handle)
 717{
 718        return true;
 719}
 720
 721static int soc15_common_wait_for_idle(void *handle)
 722{
 723        return 0;
 724}
 725
 726static int soc15_common_soft_reset(void *handle)
 727{
 728        return 0;
 729}
 730
 731static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
 732{
 733        uint32_t def, data;
 734
 735        def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
 736
 737        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 738                data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
 739        else
 740                data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
 741
 742        if (def != data)
 743                WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
 744}
 745
 746static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
 747{
 748        uint32_t def, data;
 749
 750        def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
 751
 752        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
 753                data &= ~(0x01000000 |
 754                          0x02000000 |
 755                          0x04000000 |
 756                          0x08000000 |
 757                          0x10000000 |
 758                          0x20000000 |
 759                          0x40000000 |
 760                          0x80000000);
 761        else
 762                data |= (0x01000000 |
 763                         0x02000000 |
 764                         0x04000000 |
 765                         0x08000000 |
 766                         0x10000000 |
 767                         0x20000000 |
 768                         0x40000000 |
 769                         0x80000000);
 770
 771        if (def != data)
 772                WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
 773}
 774
 775static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
 776{
 777        uint32_t def, data;
 778
 779        def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
 780
 781        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
 782                data |= 1;
 783        else
 784                data &= ~1;
 785
 786        if (def != data)
 787                WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
 788}
 789
 790static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
 791                                                       bool enable)
 792{
 793        uint32_t def, data;
 794
 795        def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
 796
 797        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
 798                data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
 799                        CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
 800        else
 801                data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
 802                        CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
 803
 804        if (def != data)
 805                WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
 806}
 807
 808static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
 809                                                       bool enable)
 810{
 811        uint32_t data;
 812
 813        /* Put DF on broadcast mode */
 814        data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
 815        data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
 816        WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
 817
 818        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
 819                data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
 820                data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
 821                data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
 822                WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
 823        } else {
 824                data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
 825                data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
 826                data |= DF_MGCG_DISABLE;
 827                WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
 828        }
 829
 830        WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
 831               mmFabricConfigAccessControl_DEFAULT);
 832}
 833
 834static int soc15_common_set_clockgating_state(void *handle,
 835                                            enum amd_clockgating_state state)
 836{
 837        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 838
 839        if (amdgpu_sriov_vf(adev))
 840                return 0;
 841
 842        switch (adev->asic_type) {
 843        case CHIP_VEGA10:
 844                nbio_v6_1_update_medium_grain_clock_gating(adev,
 845                                state == AMD_CG_STATE_GATE ? true : false);
 846                nbio_v6_1_update_medium_grain_light_sleep(adev,
 847                                state == AMD_CG_STATE_GATE ? true : false);
 848                soc15_update_hdp_light_sleep(adev,
 849                                state == AMD_CG_STATE_GATE ? true : false);
 850                soc15_update_drm_clock_gating(adev,
 851                                state == AMD_CG_STATE_GATE ? true : false);
 852                soc15_update_drm_light_sleep(adev,
 853                                state == AMD_CG_STATE_GATE ? true : false);
 854                soc15_update_rom_medium_grain_clock_gating(adev,
 855                                state == AMD_CG_STATE_GATE ? true : false);
 856                soc15_update_df_medium_grain_clock_gating(adev,
 857                                state == AMD_CG_STATE_GATE ? true : false);
 858                break;
 859        case CHIP_RAVEN:
 860                nbio_v7_0_update_medium_grain_clock_gating(adev,
 861                                state == AMD_CG_STATE_GATE ? true : false);
 862                nbio_v6_1_update_medium_grain_light_sleep(adev,
 863                                state == AMD_CG_STATE_GATE ? true : false);
 864                soc15_update_hdp_light_sleep(adev,
 865                                state == AMD_CG_STATE_GATE ? true : false);
 866                soc15_update_drm_clock_gating(adev,
 867                                state == AMD_CG_STATE_GATE ? true : false);
 868                soc15_update_drm_light_sleep(adev,
 869                                state == AMD_CG_STATE_GATE ? true : false);
 870                soc15_update_rom_medium_grain_clock_gating(adev,
 871                                state == AMD_CG_STATE_GATE ? true : false);
 872                break;
 873        default:
 874                break;
 875        }
 876        return 0;
 877}
 878
 879static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
 880{
 881        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 882        int data;
 883
 884        if (amdgpu_sriov_vf(adev))
 885                *flags = 0;
 886
 887        nbio_v6_1_get_clockgating_state(adev, flags);
 888
 889        /* AMD_CG_SUPPORT_HDP_LS */
 890        data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
 891        if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
 892                *flags |= AMD_CG_SUPPORT_HDP_LS;
 893
 894        /* AMD_CG_SUPPORT_DRM_MGCG */
 895        data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
 896        if (!(data & 0x01000000))
 897                *flags |= AMD_CG_SUPPORT_DRM_MGCG;
 898
 899        /* AMD_CG_SUPPORT_DRM_LS */
 900        data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
 901        if (data & 0x1)
 902                *flags |= AMD_CG_SUPPORT_DRM_LS;
 903
 904        /* AMD_CG_SUPPORT_ROM_MGCG */
 905        data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
 906        if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
 907                *flags |= AMD_CG_SUPPORT_ROM_MGCG;
 908
 909        /* AMD_CG_SUPPORT_DF_MGCG */
 910        data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
 911        if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
 912                *flags |= AMD_CG_SUPPORT_DF_MGCG;
 913}
 914
 915static int soc15_common_set_powergating_state(void *handle,
 916                                            enum amd_powergating_state state)
 917{
 918        /* todo */
 919        return 0;
 920}
 921
 922const struct amd_ip_funcs soc15_common_ip_funcs = {
 923        .name = "soc15_common",
 924        .early_init = soc15_common_early_init,
 925        .late_init = soc15_common_late_init,
 926        .sw_init = soc15_common_sw_init,
 927        .sw_fini = soc15_common_sw_fini,
 928        .hw_init = soc15_common_hw_init,
 929        .hw_fini = soc15_common_hw_fini,
 930        .suspend = soc15_common_suspend,
 931        .resume = soc15_common_resume,
 932        .is_idle = soc15_common_is_idle,
 933        .wait_for_idle = soc15_common_wait_for_idle,
 934        .soft_reset = soc15_common_soft_reset,
 935        .set_clockgating_state = soc15_common_set_clockgating_state,
 936        .set_powergating_state = soc15_common_set_powergating_state,
 937        .get_clockgating_state= soc15_common_get_clockgating_state,
 938};
 939