linux/drivers/gpu/drm/amd/amdgpu/nv.c
<<
>>
Prefs
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include <linux/firmware.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/pci.h>
  27
  28#include "amdgpu.h"
  29#include "amdgpu_atombios.h"
  30#include "amdgpu_ih.h"
  31#include "amdgpu_uvd.h"
  32#include "amdgpu_vce.h"
  33#include "amdgpu_ucode.h"
  34#include "amdgpu_psp.h"
  35#include "amdgpu_smu.h"
  36#include "atom.h"
  37#include "amd_pcie.h"
  38
  39#include "gc/gc_10_1_0_offset.h"
  40#include "gc/gc_10_1_0_sh_mask.h"
  41#include "hdp/hdp_5_0_0_offset.h"
  42#include "hdp/hdp_5_0_0_sh_mask.h"
  43#include "smuio/smuio_11_0_0_offset.h"
  44
  45#include "soc15.h"
  46#include "soc15_common.h"
  47#include "gmc_v10_0.h"
  48#include "gfxhub_v2_0.h"
  49#include "mmhub_v2_0.h"
  50#include "nbio_v2_3.h"
  51#include "nv.h"
  52#include "navi10_ih.h"
  53#include "gfx_v10_0.h"
  54#include "sdma_v5_0.h"
  55#include "vcn_v2_0.h"
  56#include "jpeg_v2_0.h"
  57#include "dce_virtual.h"
  58#include "mes_v10_1.h"
  59#include "mxgpu_nv.h"
  60
  61static const struct amd_ip_funcs nv_common_ip_funcs;
  62
  63/*
  64 * Indirect registers accessor
  65 */
  66static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
  67{
  68        unsigned long flags, address, data;
  69        u32 r;
  70        address = adev->nbio.funcs->get_pcie_index_offset(adev);
  71        data = adev->nbio.funcs->get_pcie_data_offset(adev);
  72
  73        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  74        WREG32(address, reg);
  75        (void)RREG32(address);
  76        r = RREG32(data);
  77        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  78        return r;
  79}
  80
  81static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
  82{
  83        unsigned long flags, address, data;
  84
  85        address = adev->nbio.funcs->get_pcie_index_offset(adev);
  86        data = adev->nbio.funcs->get_pcie_data_offset(adev);
  87
  88        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
  89        WREG32(address, reg);
  90        (void)RREG32(address);
  91        WREG32(data, v);
  92        (void)RREG32(data);
  93        spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
  94}
  95
  96static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
  97{
  98        unsigned long flags, address, data;
  99        u32 r;
 100
 101        address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 102        data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 103
 104        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 105        WREG32(address, (reg));
 106        r = RREG32(data);
 107        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 108        return r;
 109}
 110
 111static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 112{
 113        unsigned long flags, address, data;
 114
 115        address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
 116        data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
 117
 118        spin_lock_irqsave(&adev->didt_idx_lock, flags);
 119        WREG32(address, (reg));
 120        WREG32(data, (v));
 121        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 122}
 123
 124static u32 nv_get_config_memsize(struct amdgpu_device *adev)
 125{
 126        return adev->nbio.funcs->get_memsize(adev);
 127}
 128
 129static u32 nv_get_xclk(struct amdgpu_device *adev)
 130{
 131        return adev->clock.spll.reference_freq;
 132}
 133
 134
 135void nv_grbm_select(struct amdgpu_device *adev,
 136                     u32 me, u32 pipe, u32 queue, u32 vmid)
 137{
 138        u32 grbm_gfx_cntl = 0;
 139        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
 140        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
 141        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
 142        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 143
 144        WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
 145}
 146
 147static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
 148{
 149        /* todo */
 150}
 151
 152static bool nv_read_disabled_bios(struct amdgpu_device *adev)
 153{
 154        /* todo */
 155        return false;
 156}
 157
 158static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
 159                                  u8 *bios, u32 length_bytes)
 160{
 161        u32 *dw_ptr;
 162        u32 i, length_dw;
 163
 164        if (bios == NULL)
 165                return false;
 166        if (length_bytes == 0)
 167                return false;
 168        /* APU vbios image is part of sbios image */
 169        if (adev->flags & AMD_IS_APU)
 170                return false;
 171
 172        dw_ptr = (u32 *)bios;
 173        length_dw = ALIGN(length_bytes, 4) / 4;
 174
 175        /* set rom index to 0 */
 176        WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
 177        /* read out the rom data */
 178        for (i = 0; i < length_dw; i++)
 179                dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
 180
 181        return true;
 182}
 183
 184static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
 185        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
 186        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
 187        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
 188        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
 189        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
 190        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
 191#if 0   /* TODO: will set it when SDMA header is available */
 192        { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
 193        { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
 194#endif
 195        { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
 196        { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
 197        { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
 198        { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
 199        { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
 200        { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
 201        { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
 202        { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
 203        { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
 204        { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
 205        { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
 206};
 207
 208static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 209                                         u32 sh_num, u32 reg_offset)
 210{
 211        uint32_t val;
 212
 213        mutex_lock(&adev->grbm_idx_mutex);
 214        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 215                amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 216
 217        val = RREG32(reg_offset);
 218
 219        if (se_num != 0xffffffff || sh_num != 0xffffffff)
 220                amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 221        mutex_unlock(&adev->grbm_idx_mutex);
 222        return val;
 223}
 224
 225static uint32_t nv_get_register_value(struct amdgpu_device *adev,
 226                                      bool indexed, u32 se_num,
 227                                      u32 sh_num, u32 reg_offset)
 228{
 229        if (indexed) {
 230                return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
 231        } else {
 232                if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
 233                        return adev->gfx.config.gb_addr_config;
 234                return RREG32(reg_offset);
 235        }
 236}
 237
 238static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
 239                            u32 sh_num, u32 reg_offset, u32 *value)
 240{
 241        uint32_t i;
 242        struct soc15_allowed_register_entry  *en;
 243
 244        *value = 0;
 245        for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
 246                en = &nv_allowed_read_registers[i];
 247                if (reg_offset !=
 248                    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
 249                        continue;
 250
 251                *value = nv_get_register_value(adev,
 252                                               nv_allowed_read_registers[i].grbm_indexed,
 253                                               se_num, sh_num, reg_offset);
 254                return 0;
 255        }
 256        return -EINVAL;
 257}
 258
 259#if 0
 260static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
 261{
 262        u32 i;
 263
 264        dev_info(adev->dev, "GPU pci config reset\n");
 265
 266        /* disable BM */
 267        pci_clear_master(adev->pdev);
 268        /* reset */
 269        amdgpu_pci_config_reset(adev);
 270
 271        udelay(100);
 272
 273        /* wait for asic to come out of reset */
 274        for (i = 0; i < adev->usec_timeout; i++) {
 275                u32 memsize = nbio_v2_3_get_memsize(adev);
 276                if (memsize != 0xffffffff)
 277                        break;
 278                udelay(1);
 279        }
 280
 281}
 282#endif
 283
 284static int nv_asic_mode1_reset(struct amdgpu_device *adev)
 285{
 286        u32 i;
 287        int ret = 0;
 288
 289        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 290
 291        dev_info(adev->dev, "GPU mode1 reset\n");
 292
 293        /* disable BM */
 294        pci_clear_master(adev->pdev);
 295
 296        pci_save_state(adev->pdev);
 297
 298        ret = psp_gpu_reset(adev);
 299        if (ret)
 300                dev_err(adev->dev, "GPU mode1 reset failed\n");
 301
 302        pci_restore_state(adev->pdev);
 303
 304        /* wait for asic to come out of reset */
 305        for (i = 0; i < adev->usec_timeout; i++) {
 306                u32 memsize = adev->nbio.funcs->get_memsize(adev);
 307
 308                if (memsize != 0xffffffff)
 309                        break;
 310                udelay(1);
 311        }
 312
 313        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 314
 315        return ret;
 316}
 317
 318static bool nv_asic_supports_baco(struct amdgpu_device *adev)
 319{
 320        struct smu_context *smu = &adev->smu;
 321
 322        if (smu_baco_is_support(smu))
 323                return true;
 324        else
 325                return false;
 326}
 327
 328static enum amd_reset_method
 329nv_asic_reset_method(struct amdgpu_device *adev)
 330{
 331        struct smu_context *smu = &adev->smu;
 332
 333        if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
 334                return AMD_RESET_METHOD_BACO;
 335        else
 336                return AMD_RESET_METHOD_MODE1;
 337}
 338
 339static int nv_asic_reset(struct amdgpu_device *adev)
 340{
 341
 342        /* FIXME: it doesn't work since vega10 */
 343#if 0
 344        amdgpu_atombios_scratch_regs_engine_hung(adev, true);
 345
 346        nv_gpu_pci_config_reset(adev);
 347
 348        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
 349#endif
 350        int ret = 0;
 351        struct smu_context *smu = &adev->smu;
 352
 353        if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
 354                if (!adev->in_suspend)
 355                        amdgpu_inc_vram_lost(adev);
 356                ret = smu_baco_enter(smu);
 357                if (ret)
 358                        return ret;
 359                ret = smu_baco_exit(smu);
 360                if (ret)
 361                        return ret;
 362        } else {
 363                if (!adev->in_suspend)
 364                        amdgpu_inc_vram_lost(adev);
 365                ret = nv_asic_mode1_reset(adev);
 366        }
 367
 368        return ret;
 369}
 370
 371static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
 372{
 373        /* todo */
 374        return 0;
 375}
 376
 377static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 378{
 379        /* todo */
 380        return 0;
 381}
 382
 383static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
 384{
 385        if (pci_is_root_bus(adev->pdev->bus))
 386                return;
 387
 388        if (amdgpu_pcie_gen2 == 0)
 389                return;
 390
 391        if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 392                                        CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
 393                return;
 394
 395        /* todo */
 396}
 397
 398static void nv_program_aspm(struct amdgpu_device *adev)
 399{
 400
 401        if (amdgpu_aspm == 0)
 402                return;
 403
 404        /* todo */
 405}
 406
 407static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
 408                                        bool enable)
 409{
 410        adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
 411        adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
 412}
 413
 414static const struct amdgpu_ip_block_version nv_common_ip_block =
 415{
 416        .type = AMD_IP_BLOCK_TYPE_COMMON,
 417        .major = 1,
 418        .minor = 0,
 419        .rev = 0,
 420        .funcs = &nv_common_ip_funcs,
 421};
 422
 423static int nv_reg_base_init(struct amdgpu_device *adev)
 424{
 425        int r;
 426
 427        if (amdgpu_discovery) {
 428                r = amdgpu_discovery_reg_base_init(adev);
 429                if (r) {
 430                        DRM_WARN("failed to init reg base from ip discovery table, "
 431                                        "fallback to legacy init method\n");
 432                        goto legacy_init;
 433                }
 434
 435                return 0;
 436        }
 437
 438legacy_init:
 439        switch (adev->asic_type) {
 440        case CHIP_NAVI10:
 441                navi10_reg_base_init(adev);
 442                break;
 443        case CHIP_NAVI14:
 444                navi14_reg_base_init(adev);
 445                break;
 446        case CHIP_NAVI12:
 447                navi12_reg_base_init(adev);
 448                break;
 449        default:
 450                return -EINVAL;
 451        }
 452
 453        return 0;
 454}
 455
 456int nv_set_ip_blocks(struct amdgpu_device *adev)
 457{
 458        int r;
 459
 460        /* Set IP register base before any HW register access */
 461        r = nv_reg_base_init(adev);
 462        if (r)
 463                return r;
 464
 465        adev->nbio.funcs = &nbio_v2_3_funcs;
 466        adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 467
 468        adev->nbio.funcs->detect_hw_virt(adev);
 469
 470        if (amdgpu_sriov_vf(adev))
 471                adev->virt.ops = &xgpu_nv_virt_ops;
 472
 473        switch (adev->asic_type) {
 474        case CHIP_NAVI10:
 475        case CHIP_NAVI14:
 476                amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
 477                amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
 478                amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
 479                amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 480                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
 481                    !amdgpu_sriov_vf(adev))
 482                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 483                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 484                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 485#if defined(CONFIG_DRM_AMD_DC)
 486                else if (amdgpu_device_has_dc_support(adev))
 487                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
 488#endif
 489                amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
 490                amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
 491                if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
 492                    !amdgpu_sriov_vf(adev))
 493                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 494                amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
 495                amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 496                if (adev->enable_mes)
 497                        amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
 498                break;
 499        case CHIP_NAVI12:
 500                amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
 501                amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
 502                amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
 503                amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 504                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
 505                    !amdgpu_sriov_vf(adev))
 506                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 507                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
 508                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 509#if defined(CONFIG_DRM_AMD_DC)
 510                else if (amdgpu_device_has_dc_support(adev))
 511                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
 512#endif
 513                amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
 514                amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
 515                if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
 516                    !amdgpu_sriov_vf(adev))
 517                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 518                amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
 519                amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 520                break;
 521        default:
 522                return -EINVAL;
 523        }
 524
 525        return 0;
 526}
 527
 528static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
 529{
 530        return adev->nbio.funcs->get_rev_id(adev);
 531}
 532
 533static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 534{
 535        adev->nbio.funcs->hdp_flush(adev, ring);
 536}
 537
 538static void nv_invalidate_hdp(struct amdgpu_device *adev,
 539                                struct amdgpu_ring *ring)
 540{
 541        if (!ring || !ring->funcs->emit_wreg) {
 542                WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
 543        } else {
 544                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
 545                                        HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
 546        }
 547}
 548
 549static bool nv_need_full_reset(struct amdgpu_device *adev)
 550{
 551        return true;
 552}
 553
 554static void nv_get_pcie_usage(struct amdgpu_device *adev,
 555                              uint64_t *count0,
 556                              uint64_t *count1)
 557{
 558        /*TODO*/
 559}
 560
 561static bool nv_need_reset_on_init(struct amdgpu_device *adev)
 562{
 563#if 0
 564        u32 sol_reg;
 565
 566        if (adev->flags & AMD_IS_APU)
 567                return false;
 568
 569        /* Check sOS sign of life register to confirm sys driver and sOS
 570         * are already been loaded.
 571         */
 572        sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
 573        if (sol_reg)
 574                return true;
 575#endif
 576        /* TODO: re-enable it when mode1 reset is functional */
 577        return false;
 578}
 579
 580static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
 581{
 582
 583        /* TODO
 584         * dummy implement for pcie_replay_count sysfs interface
 585         * */
 586
 587        return 0;
 588}
 589
 590static void nv_init_doorbell_index(struct amdgpu_device *adev)
 591{
 592        adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
 593        adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
 594        adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
 595        adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
 596        adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
 597        adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
 598        adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
 599        adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
 600        adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
 601        adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
 602        adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
 603        adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
 604        adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
 605        adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
 606        adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
 607        adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
 608        adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
 609        adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
 610        adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
 611        adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
 612        adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
 613        adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
 614
 615        adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
 616        adev->doorbell_index.sdma_doorbell_range = 20;
 617}
 618
 619static const struct amdgpu_asic_funcs nv_asic_funcs =
 620{
 621        .read_disabled_bios = &nv_read_disabled_bios,
 622        .read_bios_from_rom = &nv_read_bios_from_rom,
 623        .read_register = &nv_read_register,
 624        .reset = &nv_asic_reset,
 625        .reset_method = &nv_asic_reset_method,
 626        .set_vga_state = &nv_vga_set_state,
 627        .get_xclk = &nv_get_xclk,
 628        .set_uvd_clocks = &nv_set_uvd_clocks,
 629        .set_vce_clocks = &nv_set_vce_clocks,
 630        .get_config_memsize = &nv_get_config_memsize,
 631        .flush_hdp = &nv_flush_hdp,
 632        .invalidate_hdp = &nv_invalidate_hdp,
 633        .init_doorbell_index = &nv_init_doorbell_index,
 634        .need_full_reset = &nv_need_full_reset,
 635        .get_pcie_usage = &nv_get_pcie_usage,
 636        .need_reset_on_init = &nv_need_reset_on_init,
 637        .get_pcie_replay_count = &nv_get_pcie_replay_count,
 638        .supports_baco = &nv_asic_supports_baco,
 639};
 640
 641static int nv_common_early_init(void *handle)
 642{
 643#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
 644        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 645
 646        adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
 647        adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
 648        adev->smc_rreg = NULL;
 649        adev->smc_wreg = NULL;
 650        adev->pcie_rreg = &nv_pcie_rreg;
 651        adev->pcie_wreg = &nv_pcie_wreg;
 652
 653        /* TODO: will add them during VCN v2 implementation */
 654        adev->uvd_ctx_rreg = NULL;
 655        adev->uvd_ctx_wreg = NULL;
 656
 657        adev->didt_rreg = &nv_didt_rreg;
 658        adev->didt_wreg = &nv_didt_wreg;
 659
 660        adev->asic_funcs = &nv_asic_funcs;
 661
 662        adev->rev_id = nv_get_rev_id(adev);
 663        adev->external_rev_id = 0xff;
 664        switch (adev->asic_type) {
 665        case CHIP_NAVI10:
 666                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 667                        AMD_CG_SUPPORT_GFX_CGCG |
 668                        AMD_CG_SUPPORT_IH_CG |
 669                        AMD_CG_SUPPORT_HDP_MGCG |
 670                        AMD_CG_SUPPORT_HDP_LS |
 671                        AMD_CG_SUPPORT_SDMA_MGCG |
 672                        AMD_CG_SUPPORT_SDMA_LS |
 673                        AMD_CG_SUPPORT_MC_MGCG |
 674                        AMD_CG_SUPPORT_MC_LS |
 675                        AMD_CG_SUPPORT_ATHUB_MGCG |
 676                        AMD_CG_SUPPORT_ATHUB_LS |
 677                        AMD_CG_SUPPORT_VCN_MGCG |
 678                        AMD_CG_SUPPORT_JPEG_MGCG |
 679                        AMD_CG_SUPPORT_BIF_MGCG |
 680                        AMD_CG_SUPPORT_BIF_LS;
 681                adev->pg_flags = AMD_PG_SUPPORT_VCN |
 682                        AMD_PG_SUPPORT_VCN_DPG |
 683                        AMD_PG_SUPPORT_JPEG |
 684                        AMD_PG_SUPPORT_ATHUB;
 685                adev->external_rev_id = adev->rev_id + 0x1;
 686                break;
 687        case CHIP_NAVI14:
 688                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 689                        AMD_CG_SUPPORT_GFX_CGCG |
 690                        AMD_CG_SUPPORT_IH_CG |
 691                        AMD_CG_SUPPORT_HDP_MGCG |
 692                        AMD_CG_SUPPORT_HDP_LS |
 693                        AMD_CG_SUPPORT_SDMA_MGCG |
 694                        AMD_CG_SUPPORT_SDMA_LS |
 695                        AMD_CG_SUPPORT_MC_MGCG |
 696                        AMD_CG_SUPPORT_MC_LS |
 697                        AMD_CG_SUPPORT_ATHUB_MGCG |
 698                        AMD_CG_SUPPORT_ATHUB_LS |
 699                        AMD_CG_SUPPORT_VCN_MGCG |
 700                        AMD_CG_SUPPORT_JPEG_MGCG |
 701                        AMD_CG_SUPPORT_BIF_MGCG |
 702                        AMD_CG_SUPPORT_BIF_LS;
 703                adev->pg_flags = AMD_PG_SUPPORT_VCN |
 704                        AMD_PG_SUPPORT_JPEG |
 705                        AMD_PG_SUPPORT_VCN_DPG;
 706                adev->external_rev_id = adev->rev_id + 20;
 707                break;
 708        case CHIP_NAVI12:
 709                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
 710                        AMD_CG_SUPPORT_GFX_MGLS |
 711                        AMD_CG_SUPPORT_GFX_CGCG |
 712                        AMD_CG_SUPPORT_GFX_CP_LS |
 713                        AMD_CG_SUPPORT_GFX_RLC_LS |
 714                        AMD_CG_SUPPORT_IH_CG |
 715                        AMD_CG_SUPPORT_HDP_MGCG |
 716                        AMD_CG_SUPPORT_HDP_LS |
 717                        AMD_CG_SUPPORT_SDMA_MGCG |
 718                        AMD_CG_SUPPORT_SDMA_LS |
 719                        AMD_CG_SUPPORT_MC_MGCG |
 720                        AMD_CG_SUPPORT_MC_LS |
 721                        AMD_CG_SUPPORT_ATHUB_MGCG |
 722                        AMD_CG_SUPPORT_ATHUB_LS |
 723                        AMD_CG_SUPPORT_VCN_MGCG |
 724                        AMD_CG_SUPPORT_JPEG_MGCG;
 725                adev->pg_flags = AMD_PG_SUPPORT_VCN |
 726                        AMD_PG_SUPPORT_VCN_DPG |
 727                        AMD_PG_SUPPORT_JPEG |
 728                        AMD_PG_SUPPORT_ATHUB;
 729                /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
 730                 * as a consequence, the rev_id and external_rev_id are wrong.
 731                 * workaround it by hardcoding rev_id to 0 (default value).
 732                 */
 733                if (amdgpu_sriov_vf(adev))
 734                        adev->rev_id = 0;
 735                adev->external_rev_id = adev->rev_id + 0xa;
 736                break;
 737        default:
 738                /* FIXME: not supported yet */
 739                return -EINVAL;
 740        }
 741
 742        if (amdgpu_sriov_vf(adev)) {
 743                amdgpu_virt_init_setting(adev);
 744                xgpu_nv_mailbox_set_irq_funcs(adev);
 745        }
 746
 747        return 0;
 748}
 749
 750static int nv_common_late_init(void *handle)
 751{
 752        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 753
 754        if (amdgpu_sriov_vf(adev))
 755                xgpu_nv_mailbox_get_irq(adev);
 756
 757        return 0;
 758}
 759
 760static int nv_common_sw_init(void *handle)
 761{
 762        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 763
 764        if (amdgpu_sriov_vf(adev))
 765                xgpu_nv_mailbox_add_irq_id(adev);
 766
 767        return 0;
 768}
 769
 770static int nv_common_sw_fini(void *handle)
 771{
 772        return 0;
 773}
 774
 775static int nv_common_hw_init(void *handle)
 776{
 777        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 778
 779        /* enable pcie gen2/3 link */
 780        nv_pcie_gen3_enable(adev);
 781        /* enable aspm */
 782        nv_program_aspm(adev);
 783        /* setup nbio registers */
 784        adev->nbio.funcs->init_registers(adev);
 785        /* remap HDP registers to a hole in mmio space,
 786         * for the purpose of expose those registers
 787         * to process space
 788         */
 789        if (adev->nbio.funcs->remap_hdp_registers)
 790                adev->nbio.funcs->remap_hdp_registers(adev);
 791        /* enable the doorbell aperture */
 792        nv_enable_doorbell_aperture(adev, true);
 793
 794        return 0;
 795}
 796
 797static int nv_common_hw_fini(void *handle)
 798{
 799        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 800
 801        /* disable the doorbell aperture */
 802        nv_enable_doorbell_aperture(adev, false);
 803
 804        return 0;
 805}
 806
 807static int nv_common_suspend(void *handle)
 808{
 809        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 810
 811        return nv_common_hw_fini(adev);
 812}
 813
 814static int nv_common_resume(void *handle)
 815{
 816        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 817
 818        return nv_common_hw_init(adev);
 819}
 820
 821static bool nv_common_is_idle(void *handle)
 822{
 823        return true;
 824}
 825
 826static int nv_common_wait_for_idle(void *handle)
 827{
 828        return 0;
 829}
 830
 831static int nv_common_soft_reset(void *handle)
 832{
 833        return 0;
 834}
 835
 836static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
 837                                           bool enable)
 838{
 839        uint32_t hdp_clk_cntl, hdp_clk_cntl1;
 840        uint32_t hdp_mem_pwr_cntl;
 841
 842        if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
 843                                AMD_CG_SUPPORT_HDP_DS |
 844                                AMD_CG_SUPPORT_HDP_SD)))
 845                return;
 846
 847        hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 848        hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
 849
 850        /* Before doing clock/power mode switch,
 851         * forced on IPH & RC clock */
 852        hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
 853                                     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
 854        hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
 855                                     RC_MEM_CLK_SOFT_OVERRIDE, 1);
 856        WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
 857
 858        /* HDP 5.0 doesn't support dynamic power mode switch,
 859         * disable clock and power gating before any changing */
 860        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 861                                         IPH_MEM_POWER_CTRL_EN, 0);
 862        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 863                                         IPH_MEM_POWER_LS_EN, 0);
 864        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 865                                         IPH_MEM_POWER_DS_EN, 0);
 866        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 867                                         IPH_MEM_POWER_SD_EN, 0);
 868        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 869                                         RC_MEM_POWER_CTRL_EN, 0);
 870        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 871                                         RC_MEM_POWER_LS_EN, 0);
 872        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 873                                         RC_MEM_POWER_DS_EN, 0);
 874        hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
 875                                         RC_MEM_POWER_SD_EN, 0);
 876        WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
 877
 878        /* only one clock gating mode (LS/DS/SD) can be enabled */
 879        if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
 880                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 881                                                 HDP_MEM_POWER_CTRL,
 882                                                 IPH_MEM_POWER_LS_EN, enable);
 883                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 884                                                 HDP_MEM_POWER_CTRL,
 885                                                 RC_MEM_POWER_LS_EN, enable);
 886        } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
 887                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 888                                                 HDP_MEM_POWER_CTRL,
 889                                                 IPH_MEM_POWER_DS_EN, enable);
 890                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 891                                                 HDP_MEM_POWER_CTRL,
 892                                                 RC_MEM_POWER_DS_EN, enable);
 893        } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
 894                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 895                                                 HDP_MEM_POWER_CTRL,
 896                                                 IPH_MEM_POWER_SD_EN, enable);
 897                /* RC should not use shut down mode, fallback to ds */
 898                hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
 899                                                 HDP_MEM_POWER_CTRL,
 900                                                 RC_MEM_POWER_DS_EN, enable);
 901        }
 902
 903        WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
 904
 905        /* restore IPH & RC clock override after clock/power mode changing */
 906        WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
 907}
 908
 909static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
 910                                       bool enable)
 911{
 912        uint32_t hdp_clk_cntl;
 913
 914        if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
 915                return;
 916
 917        hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 918
 919        if (enable) {
 920                hdp_clk_cntl &=
 921                        ~(uint32_t)
 922                          (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 923                           HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 924                           HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 925                           HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 926                           HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 927                           HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
 928        } else {
 929                hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 930                        HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 931                        HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 932                        HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 933                        HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 934                        HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
 935        }
 936
 937        WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
 938}
 939
 940static int nv_common_set_clockgating_state(void *handle,
 941                                           enum amd_clockgating_state state)
 942{
 943        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 944
 945        if (amdgpu_sriov_vf(adev))
 946                return 0;
 947
 948        switch (adev->asic_type) {
 949        case CHIP_NAVI10:
 950        case CHIP_NAVI14:
 951        case CHIP_NAVI12:
 952                adev->nbio.funcs->update_medium_grain_clock_gating(adev,
 953                                state == AMD_CG_STATE_GATE);
 954                adev->nbio.funcs->update_medium_grain_light_sleep(adev,
 955                                state == AMD_CG_STATE_GATE);
 956                nv_update_hdp_mem_power_gating(adev,
 957                                   state == AMD_CG_STATE_GATE);
 958                nv_update_hdp_clock_gating(adev,
 959                                state == AMD_CG_STATE_GATE);
 960                break;
 961        default:
 962                break;
 963        }
 964        return 0;
 965}
 966
 967static int nv_common_set_powergating_state(void *handle,
 968                                           enum amd_powergating_state state)
 969{
 970        /* TODO */
 971        return 0;
 972}
 973
 974static void nv_common_get_clockgating_state(void *handle, u32 *flags)
 975{
 976        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 977        uint32_t tmp;
 978
 979        if (amdgpu_sriov_vf(adev))
 980                *flags = 0;
 981
 982        adev->nbio.funcs->get_clockgating_state(adev, flags);
 983
 984        /* AMD_CG_SUPPORT_HDP_MGCG */
 985        tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
 986        if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
 987                     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
 988                     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
 989                     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
 990                     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
 991                     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
 992                *flags |= AMD_CG_SUPPORT_HDP_MGCG;
 993
 994        /* AMD_CG_SUPPORT_HDP_LS/DS/SD */
 995        tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
 996        if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
 997                *flags |= AMD_CG_SUPPORT_HDP_LS;
 998        else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
 999                *flags |= AMD_CG_SUPPORT_HDP_DS;
1000        else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
1001                *flags |= AMD_CG_SUPPORT_HDP_SD;
1002
1003        return;
1004}
1005
1006static const struct amd_ip_funcs nv_common_ip_funcs = {
1007        .name = "nv_common",
1008        .early_init = nv_common_early_init,
1009        .late_init = nv_common_late_init,
1010        .sw_init = nv_common_sw_init,
1011        .sw_fini = nv_common_sw_fini,
1012        .hw_init = nv_common_hw_init,
1013        .hw_fini = nv_common_hw_fini,
1014        .suspend = nv_common_suspend,
1015        .resume = nv_common_resume,
1016        .is_idle = nv_common_is_idle,
1017        .wait_for_idle = nv_common_wait_for_idle,
1018        .soft_reset = nv_common_soft_reset,
1019        .set_clockgating_state = nv_common_set_clockgating_state,
1020        .set_powergating_state = nv_common_set_powergating_state,
1021        .get_clockgating_state = nv_common_get_clockgating_state,
1022};
1023