linux/drivers/gpu/drm/amd/amdgpu/soc15.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <drm/drmP.h>
  27#include "amdgpu.h"
  28#include "amdgpu_atombios.h"
  29#include "amdgpu_ih.h"
  30#include "amdgpu_uvd.h"
  31#include "amdgpu_vce.h"
  32#include "amdgpu_ucode.h"
  33#include "amdgpu_psp.h"
  34#include "atom.h"
  35#include "amd_pcie.h"
  36
  37#include "uvd/uvd_7_0_offset.h"
  38#include "gc/gc_9_0_offset.h"
  39#include "gc/gc_9_0_sh_mask.h"
  40#include "sdma0/sdma0_4_0_offset.h"
  41#include "sdma1/sdma1_4_0_offset.h"
  42#include "hdp/hdp_4_0_offset.h"
  43#include "hdp/hdp_4_0_sh_mask.h"
  44#include "smuio/smuio_9_0_offset.h"
  45#include "smuio/smuio_9_0_sh_mask.h"
  46
  47#include "soc15.h"
  48#include "soc15_common.h"
  49#include "gfx_v9_0.h"
  50#include "gmc_v9_0.h"
  51#include "gfxhub_v1_0.h"
  52#include "mmhub_v1_0.h"
  53#include "df_v1_7.h"
  54#include "df_v3_6.h"
  55#include "vega10_ih.h"
  56#include "sdma_v4_0.h"
  57#include "uvd_v7_0.h"
  58#include "vce_v4_0.h"
  59#include "vcn_v1_0.h"
  60#include "dce_virtual.h"
  61#include "mxgpu_ai.h"
  62
  63#define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
  64#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
  65#define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
  66#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
  67
  68/*
  69 * Indirect registers accessor
  70 */
  71static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  72{
  73        unsigned long flags, address, data;
  74        u32 r;
  75        address = adev->nbio_funcs->get_pcie_index_offset(adev);
  76        data = adev->nbio_funcs->get_pcie_data_offset(adev);
  77
  78        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  79        WREG32(address, reg);
  80        (void)RREG32(address);
  81        r = RREG32(data);
  82        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  83        return r;
  84}
  85
  86static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  87{
  88        unsigned long flags, address, data;
  89
  90        address = adev->nbio_funcs->get_pcie_index_offset(adev);
  91        data = adev->nbio_funcs->get_pcie_data_offset(adev);
  92
  93        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  94        WREG32(address, reg);
  95        (void)RREG32(address);
  96        WREG32(data, v);
  97        (void)RREG32(data);
  98        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  99}
 100
 101static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 102{
 103        unsigned long flags, address, data;
 104        u32 r;
 105
 106        address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 107        data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 108
 109        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 110        WREG32(address, ((reg) & 0x1ff));
 111        r = RREG32(data);
 112        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 113        return r;
 114}
 115
 116static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 117{
 118        unsigned long flags, address, data;
 119
 120        address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
 121        data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
 122
 123        spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
 124        WREG32(address, ((reg) & 0x1ff));
 125        WREG32(data, (v));
 126        spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
 127}
 128
 129static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
 130{
 131        unsigned long flags, address, data;
 132        u32 r;
 133
 134        address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 135        data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 136
 137        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 138        WREG32(address, (reg));
 139        r = RREG32(data);
 140        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 141        return r;
 142}
 143
 144static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 145{
 146        unsigned long flags, address, data;
 147
 148        address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 149        data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 150
 151        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 152        WREG32(address, (reg));
 153        WREG32(data, (v));
 154        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 155}
 156
 157static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
 158{
 159        unsigned long flags;
 160        u32 r;
 161
 162        spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 163        WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
 164        r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
 165        spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 166        return r;
 167}
 168
 169static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 170{
 171        unsigned long flags;
 172
 173        spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
 174        WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
 175        WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
 176        spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
 177}
 178
 179static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
 180{
 181        unsigned long flags;
 182        u32 r;
 183
 184        spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
 185        WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
 186        r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
 187        spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
 188        return r;
 189}
 190
 191static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 192{
 193        unsigned long flags;
 194
 195        spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
 196        WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
 197        WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
 198        spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
 199}
 200
 201static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
 202{
 203        return adev->nbio_funcs->get_memsize(adev);
 204}
 205
 206static u32 soc15_get_xclk(struct amdgpu_device *adev)
 207{
 208        return adev->clock.spll.reference_freq;
 209}
 210
 211
 212void soc15_grbm_select(struct amdgpu_device *adev,
 213                     u32 me, u32 pipe, u32 queue, u32 vmid)
 214{
 215        u32 grbm_gfx_cntl = 0;
 216        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 217        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 218        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 219        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 220
 221        WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
 222}
 223
 224static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
 225{
 226        /* todo */
 227}
 228
 229static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
 230{
 231        /* todo */
 232        return false;
 233}
 234
 235static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
 236                                     u8 *bios, u32 length_bytes)
 237{
 238        u32 *dw_ptr;
 239        u32 i, length_dw;
 240
 241        if (bios == NULL)
 242                return false;
 243        if (length_bytes == 0)
 244                return false;
 245        /* APU vbios image is part of sbios image */
 246        if (adev->flags & AMD_IS_APU)
 247                return false;
 248
 249        dw_ptr = (u32 *)bios;
 250        length_dw = ALIGN(length_bytes, 4) / 4;
 251
 252        /* set rom index to 0 */
 253        WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
 254        /* read out the rom data */
 255        for (i = 0; i < length_dw; i++)
 256                dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
 257
 258        return true;
 259}
 260
 261struct soc15_allowed_register_entry {
 262        uint32_t hwip;
 263        uint32_t inst;
 264        uint32_t seg;
 265        uint32_t reg_offset;
 266        bool grbm_indexed;
 267};
 268
 269
 270static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
 271        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
 272        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
 273        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
 274        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
 275        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
 276        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
 277        { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
 278        { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
 279        { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
 280        { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
 281        { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
 282        { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
 283        { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
 284        { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
 285        { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 286        { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 287        { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 288        { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
 289        { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
 290};
 291
 292static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 293                                         u32 sh_num, u32 reg_offset)
 294{
 295        uint32_t val;
 296
 297        mutex_lock(&adev->grbm_idx_mutex);
 298        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 299                amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 300
 301        val = RREG32(reg_offset);
 302
 303        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 304                amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 305        mutex_unlock(&adev->grbm_idx_mutex);
 306        return val;
 307}
 308
 309static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
 310                                         bool indexed, u32 se_num,
 311                                         u32 sh_num, u32 reg_offset)
 312{
 313        if (indexed) {
 314                return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
 315        } else {
 316                if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 317                        return adev->gfx.config.gb_addr_config;
 318                else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
 319                        return adev->gfx.config.db_debug2;
 320                return RREG32(reg_offset);
 321        }
 322}
 323
 324static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
 325                            u32 sh_num, u32 reg_offset, u32 *value)
 326{
 327        uint32_t i;
 328        struct soc15_allowed_register_entry  *en;
 329
 330        *value = 0;
 331        for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
 332                en = &soc15_allowed_read_registers[i];
 333                if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
 334                                        + en->reg_offset))
 335                        continue;
 336
 337                *value = soc15_get_register_value(adev,
 338                                                  soc15_allowed_read_registers[i].grbm_indexed,
 339                                                  se_num, sh_num, reg_offset);
 340                return 0;
 341        }
 342        return -EINVAL;
 343}
 344
 345
 346/**
 347 * soc15_program_register_sequence - program an array of registers.
 348 *
 349 * @adev: amdgpu_device pointer
 350 * @regs: pointer to the register array
 351 * @array_size: size of the register array
 352 *
 353 * Programs an array or registers with and and or masks.
 354 * This is a helper for setting golden registers.
 355 */
 356
 357void soc15_program_register_sequence(struct amdgpu_device *adev,
 358                                             const struct soc15_reg_golden *regs,
 359                                             const u32 array_size)
 360{
 361        const struct soc15_reg_golden *entry;
 362        u32 tmp, reg;
 363        int i;
 364
 365        for (i = 0; i < array_size; ++i) {
 366                entry = &regs[i];
 367                reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
 368
 369                if (entry->and_mask == 0xffffffff) {
 370                        tmp = entry->or_mask;
 371                } else {
 372                        tmp = RREG32(reg);
 373                        tmp &= ~(entry->and_mask);
 374                        tmp |= entry->or_mask;
 375                }
 376                WREG32(reg, tmp);
 377        }
 378
 379}
 380
 381
 382static int soc15_asic_reset(struct amdgpu_device *adev)
 383{
 384        u32 i;
 385
 386        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 387
 388        dev_info(adev->dev, "GPU reset\n");
 389
 390        /* disable BM */
 391        pci_clear_master(adev->pdev);
 392
 393        pci_save_state(adev->pdev);
 394
 395        psp_gpu_reset(adev);
 396
 397        pci_restore_state(adev->pdev);
 398
 399        /* wait for asic to come out of reset */
 400        for (i = 0; i < adev->usec_timeout; i++) {
 401                u32 memsize = adev->nbio_funcs->get_memsize(adev);
 402
 403                if (memsize != 0xffffffff)
 404                        break;
 405                udelay(1);
 406        }
 407
 408        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 409
 410        return 0;
 411}
 412
 413/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
 414                        u32 cntl_reg, u32 status_reg)
 415{
 416        return 0;
 417}*/
 418
 419static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 420{
 421        /*int r;
 422
 423        r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
 424        if (r)
 425                return r;
 426
 427        r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
 428        */
 429        return 0;
 430}
 431
 432static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 433{
 434        /* todo */
 435
 436        return 0;
 437}
 438
 439static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
 440{
 441        if (pci_is_root_bus(adev->pdev->bus))
 442                return;
 443
 444        if (amdgpu_pcie_gen2 == 0)
 445                return;
 446
 447        if (adev->flags & AMD_IS_APU)
 448                return;
 449
 450        if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 451                                        CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 452                return;
 453
 454        /* todo */
 455}
 456
 457static void soc15_program_aspm(struct amdgpu_device *adev)
 458{
 459
 460        if (amdgpu_aspm == 0)
 461                return;
 462
 463        /* todo */
 464}
 465
 466static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
 467                                           bool enable)
 468{
 469        adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
 470        adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
 471}
 472
 473static const struct amdgpu_ip_block_version vega10_common_ip_block =
 474{
 475        .type = AMD_IP_BLOCK_TYPE_COMMON,
 476        .major = 2,
 477        .minor = 0,
 478        .rev = 0,
 479        .funcs = &soc15_common_ip_funcs,
 480};
 481
 482int soc15_set_ip_blocks(struct amdgpu_device *adev)
 483{
 484        /* Set IP register base before any HW register access */
 485        switch (adev->asic_type) {
 486        case CHIP_VEGA10:
 487        case CHIP_VEGA12:
 488        case CHIP_RAVEN:
 489                vega10_reg_base_init(adev);
 490                break;
 491        case CHIP_VEGA20:
 492                vega20_reg_base_init(adev);
 493                break;
 494        default:
 495                return -EINVAL;
 496        }
 497
 498        if (adev->flags & AMD_IS_APU)
 499                adev->nbio_funcs = &nbio_v7_0_funcs;
 500        else if (adev->asic_type == CHIP_VEGA20)
 501                adev->nbio_funcs = &nbio_v7_0_funcs;
 502        else
 503                adev->nbio_funcs = &nbio_v6_1_funcs;
 504
 505        if (adev->asic_type == CHIP_VEGA20)
 506                adev->df_funcs = &df_v3_6_funcs;
 507        else
 508                adev->df_funcs = &df_v1_7_funcs;
 509        adev->nbio_funcs->detect_hw_virt(adev);
 510
 511        if (amdgpu_sriov_vf(adev))
 512                adev->virt.ops = &xgpu_ai_virt_ops;
 513
 514        switch (adev->asic_type) {
 515        case CHIP_VEGA10:
 516        case CHIP_VEGA12:
 517        case CHIP_VEGA20:
 518                amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 519                amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 520                amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 521                if (adev->asic_type != CHIP_VEGA20) {
 522                        amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 523                        if (!amdgpu_sriov_vf(adev))
 524                                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 525                }
 526                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 527                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 528#if defined(CONFIG_DRM_AMD_DC)
 529                else if (amdgpu_device_has_dc_support(adev))
 530                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
 531#else
 532#       warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
 533#endif
 534                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 535                amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 536                amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
 537                amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
 538                break;
 539        case CHIP_RAVEN:
 540                amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
 541                amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 542                amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 543                amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
 544                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
 545                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 546                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 547#if defined(CONFIG_DRM_AMD_DC)
 548                else if (amdgpu_device_has_dc_support(adev))
 549                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
 550#else
 551#       warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
 552#endif
 553                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
 554                amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
 555                amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
 556                break;
 557        default:
 558                return -EINVAL;
 559        }
 560
 561        return 0;
 562}
 563
 564static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
 565{
 566        return adev->nbio_funcs->get_rev_id(adev);
 567}
 568
 569static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 570{
 571        adev->nbio_funcs->hdp_flush(adev, ring);
 572}
 573
 574static void soc15_invalidate_hdp(struct amdgpu_device *adev,
 575                                 struct amdgpu_ring *ring)
 576{
 577        if (!ring || !ring->funcs->emit_wreg)
 578                WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
 579        else
 580                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
 581                        HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 582}
 583
 584static bool soc15_need_full_reset(struct amdgpu_device *adev)
 585{
 586        /* change this when we implement soft reset */
 587        return true;
 588}
 589
 590static const struct amdgpu_asic_funcs soc15_asic_funcs =
 591{
 592        .read_disabled_bios = &soc15_read_disabled_bios,
 593        .read_bios_from_rom = &soc15_read_bios_from_rom,
 594        .read_register = &soc15_read_register,
 595        .reset = &soc15_asic_reset,
 596        .set_vga_state = &soc15_vga_set_state,
 597        .get_xclk = &soc15_get_xclk,
 598        .set_uvd_clocks = &soc15_set_uvd_clocks,
 599        .set_vce_clocks = &soc15_set_vce_clocks,
 600        .get_config_memsize = &soc15_get_config_memsize,
 601        .flush_hdp = &soc15_flush_hdp,
 602        .invalidate_hdp = &soc15_invalidate_hdp,
 603        .need_full_reset = &soc15_need_full_reset,
 604};
 605
 606static int soc15_common_early_init(void *handle)
 607{
 608        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 609
 610        adev->smc_rreg = NULL;
 611        adev->smc_wreg = NULL;
 612        adev->pcie_rreg = &soc15_pcie_rreg;
 613        adev->pcie_wreg = &soc15_pcie_wreg;
 614        adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
 615        adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
 616        adev->didt_rreg = &soc15_didt_rreg;
 617        adev->didt_wreg = &soc15_didt_wreg;
 618        adev->gc_cac_rreg = &soc15_gc_cac_rreg;
 619        adev->gc_cac_wreg = &soc15_gc_cac_wreg;
 620        adev->se_cac_rreg = &soc15_se_cac_rreg;
 621        adev->se_cac_wreg = &soc15_se_cac_wreg;
 622
 623        adev->asic_funcs = &soc15_asic_funcs;
 624
 625        adev->rev_id = soc15_get_rev_id(adev);
 626        adev->external_rev_id = 0xFF;
 627        switch (adev->asic_type) {
 628        case CHIP_VEGA10:
 629                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 630                        AMD_CG_SUPPORT_GFX_MGLS |
 631                        AMD_CG_SUPPORT_GFX_RLC_LS |
 632                        AMD_CG_SUPPORT_GFX_CP_LS |
 633                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 634                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 635                        AMD_CG_SUPPORT_GFX_CGCG |
 636                        AMD_CG_SUPPORT_GFX_CGLS |
 637                        AMD_CG_SUPPORT_BIF_MGCG |
 638                        AMD_CG_SUPPORT_BIF_LS |
 639                        AMD_CG_SUPPORT_HDP_LS |
 640                        AMD_CG_SUPPORT_DRM_MGCG |
 641                        AMD_CG_SUPPORT_DRM_LS |
 642                        AMD_CG_SUPPORT_ROM_MGCG |
 643                        AMD_CG_SUPPORT_DF_MGCG |
 644                        AMD_CG_SUPPORT_SDMA_MGCG |
 645                        AMD_CG_SUPPORT_SDMA_LS |
 646                        AMD_CG_SUPPORT_MC_MGCG |
 647                        AMD_CG_SUPPORT_MC_LS;
 648                adev->pg_flags = 0;
 649                adev->external_rev_id = 0x1;
 650                break;
 651        case CHIP_VEGA12:
 652                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 653                        AMD_CG_SUPPORT_GFX_MGLS |
 654                        AMD_CG_SUPPORT_GFX_CGCG |
 655                        AMD_CG_SUPPORT_GFX_CGLS |
 656                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 657                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 658                        AMD_CG_SUPPORT_GFX_CP_LS |
 659                        AMD_CG_SUPPORT_MC_LS |
 660                        AMD_CG_SUPPORT_MC_MGCG |
 661                        AMD_CG_SUPPORT_SDMA_MGCG |
 662                        AMD_CG_SUPPORT_SDMA_LS |
 663                        AMD_CG_SUPPORT_BIF_MGCG |
 664                        AMD_CG_SUPPORT_BIF_LS |
 665                        AMD_CG_SUPPORT_HDP_MGCG |
 666                        AMD_CG_SUPPORT_HDP_LS |
 667                        AMD_CG_SUPPORT_ROM_MGCG |
 668                        AMD_CG_SUPPORT_VCE_MGCG |
 669                        AMD_CG_SUPPORT_UVD_MGCG;
 670                adev->pg_flags = 0;
 671                adev->external_rev_id = adev->rev_id + 0x14;
 672                break;
 673        case CHIP_VEGA20:
 674                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 675                        AMD_CG_SUPPORT_GFX_MGLS |
 676                        AMD_CG_SUPPORT_GFX_CGCG |
 677                        AMD_CG_SUPPORT_GFX_CGLS |
 678                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 679                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 680                        AMD_CG_SUPPORT_GFX_CP_LS |
 681                        AMD_CG_SUPPORT_MC_LS |
 682                        AMD_CG_SUPPORT_MC_MGCG |
 683                        AMD_CG_SUPPORT_SDMA_MGCG |
 684                        AMD_CG_SUPPORT_SDMA_LS |
 685                        AMD_CG_SUPPORT_BIF_MGCG |
 686                        AMD_CG_SUPPORT_BIF_LS |
 687                        AMD_CG_SUPPORT_HDP_MGCG |
 688                        AMD_CG_SUPPORT_HDP_LS |
 689                        AMD_CG_SUPPORT_ROM_MGCG |
 690                        AMD_CG_SUPPORT_VCE_MGCG |
 691                        AMD_CG_SUPPORT_UVD_MGCG;
 692                adev->pg_flags = 0;
 693                adev->external_rev_id = adev->rev_id + 0x28;
 694                break;
 695        case CHIP_RAVEN:
 696                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 697                        AMD_CG_SUPPORT_GFX_MGLS |
 698                        AMD_CG_SUPPORT_GFX_RLC_LS |
 699                        AMD_CG_SUPPORT_GFX_CP_LS |
 700                        AMD_CG_SUPPORT_GFX_3D_CGCG |
 701                        AMD_CG_SUPPORT_GFX_3D_CGLS |
 702                        AMD_CG_SUPPORT_GFX_CGCG |
 703                        AMD_CG_SUPPORT_GFX_CGLS |
 704                        AMD_CG_SUPPORT_BIF_MGCG |
 705                        AMD_CG_SUPPORT_BIF_LS |
 706                        AMD_CG_SUPPORT_HDP_MGCG |
 707                        AMD_CG_SUPPORT_HDP_LS |
 708                        AMD_CG_SUPPORT_DRM_MGCG |
 709                        AMD_CG_SUPPORT_DRM_LS |
 710                        AMD_CG_SUPPORT_ROM_MGCG |
 711                        AMD_CG_SUPPORT_MC_MGCG |
 712                        AMD_CG_SUPPORT_MC_LS |
 713                        AMD_CG_SUPPORT_SDMA_MGCG |
 714                        AMD_CG_SUPPORT_SDMA_LS |
 715                        AMD_CG_SUPPORT_VCN_MGCG;
 716
 717                adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
 718
 719                if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
 720                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
 721                                AMD_PG_SUPPORT_CP |
 722                                AMD_PG_SUPPORT_RLC_SMU_HS;
 723
 724                adev->external_rev_id = 0x1;
 725                break;
 726        default:
 727                /* FIXME: not supported yet */
 728                return -EINVAL;
 729        }
 730
 731        if (amdgpu_sriov_vf(adev)) {
 732                amdgpu_virt_init_setting(adev);
 733                xgpu_ai_mailbox_set_irq_funcs(adev);
 734        }
 735
 736        return 0;
 737}
 738
 739static int soc15_common_late_init(void *handle)
 740{
 741        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 742
 743        if (amdgpu_sriov_vf(adev))
 744                xgpu_ai_mailbox_get_irq(adev);
 745
 746        return 0;
 747}
 748
 749static int soc15_common_sw_init(void *handle)
 750{
 751        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 752
 753        if (amdgpu_sriov_vf(adev))
 754                xgpu_ai_mailbox_add_irq_id(adev);
 755
 756        return 0;
 757}
 758
 759static int soc15_common_sw_fini(void *handle)
 760{
 761        return 0;
 762}
 763
 764static int soc15_common_hw_init(void *handle)
 765{
 766        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 767
 768        /* enable pcie gen2/3 link */
 769        soc15_pcie_gen3_enable(adev);
 770        /* enable aspm */
 771        soc15_program_aspm(adev);
 772        /* setup nbio registers */
 773        adev->nbio_funcs->init_registers(adev);
 774        /* enable the doorbell aperture */
 775        soc15_enable_doorbell_aperture(adev, true);
 776
 777        return 0;
 778}
 779
 780static int soc15_common_hw_fini(void *handle)
 781{
 782        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 783
 784        /* disable the doorbell aperture */
 785        soc15_enable_doorbell_aperture(adev, false);
 786        if (amdgpu_sriov_vf(adev))
 787                xgpu_ai_mailbox_put_irq(adev);
 788
 789        return 0;
 790}
 791
 792static int soc15_common_suspend(void *handle)
 793{
 794        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 795
 796        return soc15_common_hw_fini(adev);
 797}
 798
 799static int soc15_common_resume(void *handle)
 800{
 801        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 802
 803        return soc15_common_hw_init(adev);
 804}
 805
 806static bool soc15_common_is_idle(void *handle)
 807{
 808        return true;
 809}
 810
 811static int soc15_common_wait_for_idle(void *handle)
 812{
 813        return 0;
 814}
 815
 816static int soc15_common_soft_reset(void *handle)
 817{
 818        return 0;
 819}
 820
 821static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
 822{
 823        uint32_t def, data;
 824
 825        def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
 826
 827        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
 828                data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
 829        else
 830                data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
 831
 832        if (def != data)
 833                WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
 834}
 835
 836static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
 837{
 838        uint32_t def, data;
 839
 840        def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
 841
 842        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
 843                data &= ~(0x01000000 |
 844                          0x02000000 |
 845                          0x04000000 |
 846                          0x08000000 |
 847                          0x10000000 |
 848                          0x20000000 |
 849                          0x40000000 |
 850                          0x80000000);
 851        else
 852                data |= (0x01000000 |
 853                         0x02000000 |
 854                         0x04000000 |
 855                         0x08000000 |
 856                         0x10000000 |
 857                         0x20000000 |
 858                         0x40000000 |
 859                         0x80000000);
 860
 861        if (def != data)
 862                WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
 863}
 864
 865static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
 866{
 867        uint32_t def, data;
 868
 869        def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
 870
 871        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
 872                data |= 1;
 873        else
 874                data &= ~1;
 875
 876        if (def != data)
 877                WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
 878}
 879
 880static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
 881                                                       bool enable)
 882{
 883        uint32_t def, data;
 884
 885        def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
 886
 887        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
 888                data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
 889                        CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
 890        else
 891                data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
 892                        CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
 893
 894        if (def != data)
 895                WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
 896}
 897
 898static int soc15_common_set_clockgating_state(void *handle,
 899                                            enum amd_clockgating_state state)
 900{
 901        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 902
 903        if (amdgpu_sriov_vf(adev))
 904                return 0;
 905
 906        switch (adev->asic_type) {
 907        case CHIP_VEGA10:
 908        case CHIP_VEGA12:
 909        case CHIP_VEGA20:
 910                adev->nbio_funcs->update_medium_grain_clock_gating(adev,
 911                                state == AMD_CG_STATE_GATE ? true : false);
 912                adev->nbio_funcs->update_medium_grain_light_sleep(adev,
 913                                state == AMD_CG_STATE_GATE ? true : false);
 914                soc15_update_hdp_light_sleep(adev,
 915                                state == AMD_CG_STATE_GATE ? true : false);
 916                soc15_update_drm_clock_gating(adev,
 917                                state == AMD_CG_STATE_GATE ? true : false);
 918                soc15_update_drm_light_sleep(adev,
 919                                state == AMD_CG_STATE_GATE ? true : false);
 920                soc15_update_rom_medium_grain_clock_gating(adev,
 921                                state == AMD_CG_STATE_GATE ? true : false);
 922                adev->df_funcs->update_medium_grain_clock_gating(adev,
 923                                state == AMD_CG_STATE_GATE ? true : false);
 924                break;
 925        case CHIP_RAVEN:
 926                adev->nbio_funcs->update_medium_grain_clock_gating(adev,
 927                                state == AMD_CG_STATE_GATE ? true : false);
 928                adev->nbio_funcs->update_medium_grain_light_sleep(adev,
 929                                state == AMD_CG_STATE_GATE ? true : false);
 930                soc15_update_hdp_light_sleep(adev,
 931                                state == AMD_CG_STATE_GATE ? true : false);
 932                soc15_update_drm_clock_gating(adev,
 933                                state == AMD_CG_STATE_GATE ? true : false);
 934                soc15_update_drm_light_sleep(adev,
 935                                state == AMD_CG_STATE_GATE ? true : false);
 936                soc15_update_rom_medium_grain_clock_gating(adev,
 937                                state == AMD_CG_STATE_GATE ? true : false);
 938                break;
 939        default:
 940                break;
 941        }
 942        return 0;
 943}
 944
 945static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
 946{
 947        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 948        int data;
 949
 950        if (amdgpu_sriov_vf(adev))
 951                *flags = 0;
 952
 953        adev->nbio_funcs->get_clockgating_state(adev, flags);
 954
 955        /* AMD_CG_SUPPORT_HDP_LS */
 956        data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
 957        if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
 958                *flags |= AMD_CG_SUPPORT_HDP_LS;
 959
 960        /* AMD_CG_SUPPORT_DRM_MGCG */
 961        data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
 962        if (!(data & 0x01000000))
 963                *flags |= AMD_CG_SUPPORT_DRM_MGCG;
 964
 965        /* AMD_CG_SUPPORT_DRM_LS */
 966        data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
 967        if (data & 0x1)
 968                *flags |= AMD_CG_SUPPORT_DRM_LS;
 969
 970        /* AMD_CG_SUPPORT_ROM_MGCG */
 971        data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
 972        if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
 973                *flags |= AMD_CG_SUPPORT_ROM_MGCG;
 974
 975        adev->df_funcs->get_clockgating_state(adev, flags);
 976}
 977
 978static int soc15_common_set_powergating_state(void *handle,
 979                                            enum amd_powergating_state state)
 980{
 981        /* todo */
 982        return 0;
 983}
 984
 985const struct amd_ip_funcs soc15_common_ip_funcs = {
 986        .name = "soc15_common",
 987        .early_init = soc15_common_early_init,
 988        .late_init = soc15_common_late_init,
 989        .sw_init = soc15_common_sw_init,
 990        .sw_fini = soc15_common_sw_fini,
 991        .hw_init = soc15_common_hw_init,
 992        .hw_fini = soc15_common_hw_fini,
 993        .suspend = soc15_common_suspend,
 994        .resume = soc15_common_resume,
 995        .is_idle = soc15_common_is_idle,
 996        .wait_for_idle = soc15_common_wait_for_idle,
 997        .soft_reset = soc15_common_soft_reset,
 998        .set_clockgating_state = soc15_common_set_clockgating_state,
 999        .set_powergating_state = soc15_common_set_powergating_state,
1000        .get_clockgating_state= soc15_common_get_clockgating_state,
1001};
1002