linux/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
<<
>>
Prefs
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_vcn.h"
  28#include "amdgpu_pm.h"
  29#include "soc15.h"
  30#include "soc15d.h"
  31#include "vcn_v2_0.h"
  32
  33#include "vcn/vcn_2_5_offset.h"
  34#include "vcn/vcn_2_5_sh_mask.h"
  35#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
  36
  37#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET                        0x27
  38#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET                    0x0f
  39#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET                  0x10
  40#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET                  0x11
  41#define mmUVD_NO_OP_INTERNAL_OFFSET                             0x29
  42#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET                       0x66
  43#define mmUVD_SCRATCH9_INTERNAL_OFFSET                          0xc01d
  44
  45#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET                   0x431
  46#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET          0x3b4
  47#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET         0x3b5
  48#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET                       0x25c
  49
  50#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET                        0x401f
  51
  52#define VCN25_MAX_HW_INSTANCES_ARCTURUS                         2
  53
  54static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
  55static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
  56static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev);
  57static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
  58static int vcn_v2_5_set_powergating_state(void *handle,
  59                                enum amd_powergating_state state);
  60
  61static int amdgpu_ih_clientid_vcns[] = {
  62        SOC15_IH_CLIENTID_VCN,
  63        SOC15_IH_CLIENTID_VCN1
  64};
  65
  66/**
  67 * vcn_v2_5_early_init - set function pointers
  68 *
  69 * @handle: amdgpu_device pointer
  70 *
  71 * Set ring and irq function pointers
  72 */
  73static int vcn_v2_5_early_init(void *handle)
  74{
  75        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  76        if (adev->asic_type == CHIP_ARCTURUS) {
  77                u32 harvest;
  78                int i;
  79
  80                adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
  81                for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
  82                        harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
  83                        if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
  84                                adev->vcn.harvest_config |= 1 << i;
  85                }
  86
  87                if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
  88                                                 AMDGPU_VCN_HARVEST_VCN1))
  89                        /* both instances are harvested, disable the block */
  90                        return -ENOENT;
  91        } else
  92                adev->vcn.num_vcn_inst = 1;
  93
  94        adev->vcn.num_enc_rings = 2;
  95
  96        vcn_v2_5_set_dec_ring_funcs(adev);
  97        vcn_v2_5_set_enc_ring_funcs(adev);
  98        vcn_v2_5_set_jpeg_ring_funcs(adev);
  99        vcn_v2_5_set_irq_funcs(adev);
 100
 101        return 0;
 102}
 103
 104/**
 105 * vcn_v2_5_sw_init - sw init for VCN block
 106 *
 107 * @handle: amdgpu_device pointer
 108 *
 109 * Load firmware and sw initialization
 110 */
 111static int vcn_v2_5_sw_init(void *handle)
 112{
 113        struct amdgpu_ring *ring;
 114        int i, j, r;
 115        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 116
 117        for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
 118                if (adev->vcn.harvest_config & (1 << j))
 119                        continue;
 120                /* VCN DEC TRAP */
 121                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
 122                                VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
 123                if (r)
 124                        return r;
 125
 126                /* VCN ENC TRAP */
 127                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 128                        r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
 129                                i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
 130                        if (r)
 131                                return r;
 132                }
 133
 134                /* VCN JPEG TRAP */
 135                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
 136                                VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq);
 137                if (r)
 138                        return r;
 139        }
 140
 141        r = amdgpu_vcn_sw_init(adev);
 142        if (r)
 143                return r;
 144
 145        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 146                const struct common_firmware_header *hdr;
 147                hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 148                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
 149                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
 150                adev->firmware.fw_size +=
 151                        ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 152
 153                if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
 154                        adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
 155                        adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
 156                        adev->firmware.fw_size +=
 157                                ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 158                }
 159                DRM_INFO("PSP loading VCN firmware\n");
 160        }
 161
 162        r = amdgpu_vcn_resume(adev);
 163        if (r)
 164                return r;
 165
 166        for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
 167                if (adev->vcn.harvest_config & (1 << j))
 168                        continue;
 169                adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
 170                adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
 171                adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
 172                adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
 173                adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
 174                adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
 175
 176                adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
 177                adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
 178                adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
 179                adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
 180                adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
 181                adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
 182                adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
 183                adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
 184                adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
 185                adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
 186
 187                adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
 188                adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH);
 189
 190                ring = &adev->vcn.inst[j].ring_dec;
 191                ring->use_doorbell = true;
 192                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
 193                sprintf(ring->name, "vcn_dec_%d", j);
 194                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
 195                if (r)
 196                        return r;
 197
 198                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 199                        ring = &adev->vcn.inst[j].ring_enc[i];
 200                        ring->use_doorbell = true;
 201                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
 202                        sprintf(ring->name, "vcn_enc_%d.%d", j, i);
 203                        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
 204                        if (r)
 205                                return r;
 206                }
 207
 208                ring = &adev->vcn.inst[j].ring_jpeg;
 209                ring->use_doorbell = true;
 210                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
 211                sprintf(ring->name, "vcn_jpeg_%d", j);
 212                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
 213                if (r)
 214                        return r;
 215        }
 216
 217        return 0;
 218}
 219
 220/**
 221 * vcn_v2_5_sw_fini - sw fini for VCN block
 222 *
 223 * @handle: amdgpu_device pointer
 224 *
 225 * VCN suspend and free up sw allocation
 226 */
 227static int vcn_v2_5_sw_fini(void *handle)
 228{
 229        int r;
 230        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 231
 232        r = amdgpu_vcn_suspend(adev);
 233        if (r)
 234                return r;
 235
 236        r = amdgpu_vcn_sw_fini(adev);
 237
 238        return r;
 239}
 240
 241/**
 242 * vcn_v2_5_hw_init - start and test VCN block
 243 *
 244 * @handle: amdgpu_device pointer
 245 *
 246 * Initialize the hardware, boot up the VCPU and do some testing
 247 */
 248static int vcn_v2_5_hw_init(void *handle)
 249{
 250        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 251        struct amdgpu_ring *ring;
 252        int i, j, r;
 253
 254        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 255                if (adev->vcn.harvest_config & (1 << j))
 256                        continue;
 257                ring = &adev->vcn.inst[j].ring_dec;
 258
 259                adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
 260                                                     ring->doorbell_index, j);
 261
 262                r = amdgpu_ring_test_helper(ring);
 263                if (r)
 264                        goto done;
 265
 266                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 267                        ring = &adev->vcn.inst[j].ring_enc[i];
 268                        r = amdgpu_ring_test_helper(ring);
 269                        if (r)
 270                                goto done;
 271                }
 272
 273                ring = &adev->vcn.inst[j].ring_jpeg;
 274                r = amdgpu_ring_test_helper(ring);
 275                if (r)
 276                        goto done;
 277        }
 278done:
 279        if (!r)
 280                DRM_INFO("VCN decode and encode initialized successfully.\n");
 281
 282        return r;
 283}
 284
 285/**
 286 * vcn_v2_5_hw_fini - stop the hardware block
 287 *
 288 * @handle: amdgpu_device pointer
 289 *
 290 * Stop the VCN block, mark ring as not ready any more
 291 */
 292static int vcn_v2_5_hw_fini(void *handle)
 293{
 294        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 295        struct amdgpu_ring *ring;
 296        int i, j;
 297
 298        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 299                if (adev->vcn.harvest_config & (1 << i))
 300                        continue;
 301                ring = &adev->vcn.inst[i].ring_dec;
 302
 303                if (RREG32_SOC15(VCN, i, mmUVD_STATUS))
 304                        vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
 305
 306                ring->sched.ready = false;
 307
 308                for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
 309                        ring = &adev->vcn.inst[i].ring_enc[j];
 310                        ring->sched.ready = false;
 311                }
 312
 313                ring = &adev->vcn.inst[i].ring_jpeg;
 314                ring->sched.ready = false;
 315        }
 316
 317        return 0;
 318}
 319
 320/**
 321 * vcn_v2_5_suspend - suspend VCN block
 322 *
 323 * @handle: amdgpu_device pointer
 324 *
 325 * HW fini and suspend VCN block
 326 */
 327static int vcn_v2_5_suspend(void *handle)
 328{
 329        int r;
 330        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 331
 332        r = vcn_v2_5_hw_fini(adev);
 333        if (r)
 334                return r;
 335
 336        r = amdgpu_vcn_suspend(adev);
 337
 338        return r;
 339}
 340
 341/**
 342 * vcn_v2_5_resume - resume VCN block
 343 *
 344 * @handle: amdgpu_device pointer
 345 *
 346 * Resume firmware and hw init VCN block
 347 */
 348static int vcn_v2_5_resume(void *handle)
 349{
 350        int r;
 351        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 352
 353        r = amdgpu_vcn_resume(adev);
 354        if (r)
 355                return r;
 356
 357        r = vcn_v2_5_hw_init(adev);
 358
 359        return r;
 360}
 361
 362/**
 363 * vcn_v2_5_mc_resume - memory controller programming
 364 *
 365 * @adev: amdgpu_device pointer
 366 *
 367 * Let the VCN memory controller know it's offsets
 368 */
 369static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
 370{
 371        uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
 372        uint32_t offset;
 373        int i;
 374
 375        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 376                if (adev->vcn.harvest_config & (1 << i))
 377                        continue;
 378                /* cache window 0: fw */
 379                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 380                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 381                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
 382                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 383                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
 384                        WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 385                        offset = 0;
 386                } else {
 387                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 388                                lower_32_bits(adev->vcn.inst[i].gpu_addr));
 389                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 390                                upper_32_bits(adev->vcn.inst[i].gpu_addr));
 391                        offset = size;
 392                        WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 393                                AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 394                }
 395                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 396
 397                /* cache window 1: stack */
 398                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 399                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
 400                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 401                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
 402                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
 403                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
 404
 405                /* cache window 2: context */
 406                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 407                        lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
 408                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 409                        upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
 410                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
 411                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
 412        }
 413}
 414
 415/**
 416 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
 417 *
 418 * @adev: amdgpu_device pointer
 419 *
 420 * Disable clock gating for VCN block
 421 */
 422static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
 423{
 424        uint32_t data;
 425        int ret = 0;
 426        int i;
 427
 428        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 429                if (adev->vcn.harvest_config & (1 << i))
 430                        continue;
 431                /* UVD disable CGC */
 432                data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
 433                if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 434                        data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 435                else
 436                        data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 437                data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 438                data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 439                WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
 440
 441                data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
 442                data &= ~(UVD_CGC_GATE__SYS_MASK
 443                        | UVD_CGC_GATE__UDEC_MASK
 444                        | UVD_CGC_GATE__MPEG2_MASK
 445                        | UVD_CGC_GATE__REGS_MASK
 446                        | UVD_CGC_GATE__RBC_MASK
 447                        | UVD_CGC_GATE__LMI_MC_MASK
 448                        | UVD_CGC_GATE__LMI_UMC_MASK
 449                        | UVD_CGC_GATE__IDCT_MASK
 450                        | UVD_CGC_GATE__MPRD_MASK
 451                        | UVD_CGC_GATE__MPC_MASK
 452                        | UVD_CGC_GATE__LBSI_MASK
 453                        | UVD_CGC_GATE__LRBBM_MASK
 454                        | UVD_CGC_GATE__UDEC_RE_MASK
 455                        | UVD_CGC_GATE__UDEC_CM_MASK
 456                        | UVD_CGC_GATE__UDEC_IT_MASK
 457                        | UVD_CGC_GATE__UDEC_DB_MASK
 458                        | UVD_CGC_GATE__UDEC_MP_MASK
 459                        | UVD_CGC_GATE__WCB_MASK
 460                        | UVD_CGC_GATE__VCPU_MASK
 461                        | UVD_CGC_GATE__MMSCH_MASK);
 462
 463                WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
 464
 465                SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF, ret);
 466
 467                data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
 468                data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
 469                        | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
 470                        | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
 471                        | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
 472                        | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
 473                        | UVD_CGC_CTRL__SYS_MODE_MASK
 474                        | UVD_CGC_CTRL__UDEC_MODE_MASK
 475                        | UVD_CGC_CTRL__MPEG2_MODE_MASK
 476                        | UVD_CGC_CTRL__REGS_MODE_MASK
 477                        | UVD_CGC_CTRL__RBC_MODE_MASK
 478                        | UVD_CGC_CTRL__LMI_MC_MODE_MASK
 479                        | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
 480                        | UVD_CGC_CTRL__IDCT_MODE_MASK
 481                        | UVD_CGC_CTRL__MPRD_MODE_MASK
 482                        | UVD_CGC_CTRL__MPC_MODE_MASK
 483                        | UVD_CGC_CTRL__LBSI_MODE_MASK
 484                        | UVD_CGC_CTRL__LRBBM_MODE_MASK
 485                        | UVD_CGC_CTRL__WCB_MODE_MASK
 486                        | UVD_CGC_CTRL__VCPU_MODE_MASK
 487                        | UVD_CGC_CTRL__MMSCH_MODE_MASK);
 488                WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
 489
 490                /* turn on */
 491                data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
 492                data |= (UVD_SUVD_CGC_GATE__SRE_MASK
 493                        | UVD_SUVD_CGC_GATE__SIT_MASK
 494                        | UVD_SUVD_CGC_GATE__SMP_MASK
 495                        | UVD_SUVD_CGC_GATE__SCM_MASK
 496                        | UVD_SUVD_CGC_GATE__SDB_MASK
 497                        | UVD_SUVD_CGC_GATE__SRE_H264_MASK
 498                        | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
 499                        | UVD_SUVD_CGC_GATE__SIT_H264_MASK
 500                        | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
 501                        | UVD_SUVD_CGC_GATE__SCM_H264_MASK
 502                        | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
 503                        | UVD_SUVD_CGC_GATE__SDB_H264_MASK
 504                        | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
 505                        | UVD_SUVD_CGC_GATE__SCLR_MASK
 506                        | UVD_SUVD_CGC_GATE__UVD_SC_MASK
 507                        | UVD_SUVD_CGC_GATE__ENT_MASK
 508                        | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
 509                        | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
 510                        | UVD_SUVD_CGC_GATE__SITE_MASK
 511                        | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
 512                        | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
 513                        | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
 514                        | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
 515                        | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
 516                WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
 517
 518                data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
 519                data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
 520                        | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
 521                        | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
 522                        | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
 523                        | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
 524                        | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
 525                        | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
 526                        | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
 527                        | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
 528                        | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
 529                WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
 530        }
 531}
 532
 533/**
 534 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
 535 *
 536 * @adev: amdgpu_device pointer
 537 *
 538 * Enable clock gating for VCN block
 539 */
 540static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
 541{
 542        uint32_t data = 0;
 543        int i;
 544
 545        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 546                if (adev->vcn.harvest_config & (1 << i))
 547                        continue;
 548                /* enable UVD CGC */
 549                data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
 550                if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 551                        data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 552                else
 553                        data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 554                data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 555                data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 556                WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
 557
 558                data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
 559                data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
 560                        | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
 561                        | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
 562                        | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
 563                        | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
 564                        | UVD_CGC_CTRL__SYS_MODE_MASK
 565                        | UVD_CGC_CTRL__UDEC_MODE_MASK
 566                        | UVD_CGC_CTRL__MPEG2_MODE_MASK
 567                        | UVD_CGC_CTRL__REGS_MODE_MASK
 568                        | UVD_CGC_CTRL__RBC_MODE_MASK
 569                        | UVD_CGC_CTRL__LMI_MC_MODE_MASK
 570                        | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
 571                        | UVD_CGC_CTRL__IDCT_MODE_MASK
 572                        | UVD_CGC_CTRL__MPRD_MODE_MASK
 573                        | UVD_CGC_CTRL__MPC_MODE_MASK
 574                        | UVD_CGC_CTRL__LBSI_MODE_MASK
 575                        | UVD_CGC_CTRL__LRBBM_MODE_MASK
 576                        | UVD_CGC_CTRL__WCB_MODE_MASK
 577                        | UVD_CGC_CTRL__VCPU_MODE_MASK);
 578                WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
 579
 580                data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
 581                data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
 582                        | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
 583                        | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
 584                        | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
 585                        | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
 586                        | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
 587                        | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
 588                        | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
 589                        | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
 590                        | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
 591                WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
 592        }
 593}
 594
 595/**
 596 * jpeg_v2_5_start - start JPEG block
 597 *
 598 * @adev: amdgpu_device pointer
 599 *
 600 * Setup and start the JPEG block
 601 */
 602static int jpeg_v2_5_start(struct amdgpu_device *adev)
 603{
 604        struct amdgpu_ring *ring;
 605        uint32_t tmp;
 606        int i;
 607
 608        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 609                if (adev->vcn.harvest_config & (1 << i))
 610                        continue;
 611                ring = &adev->vcn.inst[i].ring_jpeg;
 612                /* disable anti hang mechanism */
 613                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0,
 614                        ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
 615
 616                /* JPEG disable CGC */
 617                tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
 618                tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 619                tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 620                tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 621                WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
 622
 623                tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
 624                tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
 625                        | JPEG_CGC_GATE__JPEG2_DEC_MASK
 626                        | JPEG_CGC_GATE__JMCIF_MASK
 627                        | JPEG_CGC_GATE__JRBBM_MASK);
 628                WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
 629
 630                tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL);
 631                tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
 632                        | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
 633                        | JPEG_CGC_CTRL__JMCIF_MODE_MASK
 634                        | JPEG_CGC_CTRL__JRBBM_MODE_MASK);
 635                WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp);
 636
 637                /* MJPEG global tiling registers */
 638                WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG,
 639                        adev->gfx.config.gb_addr_config);
 640                WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG,
 641                        adev->gfx.config.gb_addr_config);
 642
 643                /* enable JMI channel */
 644                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0,
 645                        ~UVD_JMI_CNTL__SOFT_RESET_MASK);
 646
 647                /* enable System Interrupt for JRBC */
 648                WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN),
 649                        JPEG_SYS_INT_EN__DJRBC_MASK,
 650                        ~JPEG_SYS_INT_EN__DJRBC_MASK);
 651
 652                WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0);
 653                WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
 654                WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
 655                        lower_32_bits(ring->gpu_addr));
 656                WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
 657                        upper_32_bits(ring->gpu_addr));
 658                WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0);
 659                WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0);
 660                WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L);
 661                WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
 662                ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
 663        }
 664
 665        return 0;
 666}
 667
 668/**
 669 * jpeg_v2_5_stop - stop JPEG block
 670 *
 671 * @adev: amdgpu_device pointer
 672 *
 673 * stop the JPEG block
 674 */
 675static int jpeg_v2_5_stop(struct amdgpu_device *adev)
 676{
 677        uint32_t tmp;
 678        int i;
 679
 680        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 681                if (adev->vcn.harvest_config & (1 << i))
 682                        continue;
 683                /* reset JMI */
 684                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL),
 685                        UVD_JMI_CNTL__SOFT_RESET_MASK,
 686                        ~UVD_JMI_CNTL__SOFT_RESET_MASK);
 687
 688                tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE);
 689                tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK
 690                        |JPEG_CGC_GATE__JPEG2_DEC_MASK
 691                        |JPEG_CGC_GATE__JMCIF_MASK
 692                        |JPEG_CGC_GATE__JRBBM_MASK);
 693                WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp);
 694
 695                /* enable anti hang mechanism */
 696                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS),
 697                        UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
 698                        ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
 699        }
 700
 701        return 0;
 702}
 703
 704static int vcn_v2_5_start(struct amdgpu_device *adev)
 705{
 706        struct amdgpu_ring *ring;
 707        uint32_t rb_bufsz, tmp;
 708        int i, j, k, r;
 709
 710        if (adev->pm.dpm_enabled)
 711                amdgpu_dpm_enable_uvd(adev, true);
 712
 713        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 714                if (adev->vcn.harvest_config & (1 << i))
 715                        continue;
 716                /* disable register anti-hang mechanism */
 717                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
 718                        ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 719
 720                /* set uvd status busy */
 721                tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
 722                WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
 723        }
 724
 725        /*SW clock gating */
 726        vcn_v2_5_disable_clock_gating(adev);
 727
 728        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 729                if (adev->vcn.harvest_config & (1 << i))
 730                        continue;
 731                /* enable VCPU clock */
 732                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 733                        UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
 734
 735                /* disable master interrupt */
 736                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
 737                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 738
 739                /* setup mmUVD_LMI_CTRL */
 740                tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
 741                tmp &= ~0xff;
 742                WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
 743                        UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 744                        UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
 745                        UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 746                        UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
 747
 748                /* setup mmUVD_MPC_CNTL */
 749                tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
 750                tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
 751                tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
 752                WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
 753
 754                /* setup UVD_MPC_SET_MUXA0 */
 755                WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
 756                        ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
 757                        (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
 758                        (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
 759                        (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
 760
 761                /* setup UVD_MPC_SET_MUXB0 */
 762                WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
 763                        ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
 764                        (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
 765                        (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
 766                        (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
 767
 768                /* setup mmUVD_MPC_SET_MUX */
 769                WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
 770                        ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
 771                        (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
 772                        (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
 773        }
 774
 775        vcn_v2_5_mc_resume(adev);
 776
 777        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 778                if (adev->vcn.harvest_config & (1 << i))
 779                        continue;
 780                /* VCN global tiling registers */
 781                WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
 782                        adev->gfx.config.gb_addr_config);
 783                WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
 784                        adev->gfx.config.gb_addr_config);
 785
 786                /* enable LMI MC and UMC channels */
 787                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
 788                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 789
 790                /* unblock VCPU register access */
 791                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
 792                        ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 793
 794                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
 795                        ~UVD_VCPU_CNTL__BLK_RST_MASK);
 796
 797                for (k = 0; k < 10; ++k) {
 798                        uint32_t status;
 799
 800                        for (j = 0; j < 100; ++j) {
 801                                status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
 802                                if (status & 2)
 803                                        break;
 804                                if (amdgpu_emu_mode == 1)
 805                                        msleep(500);
 806                                else
 807                                        mdelay(10);
 808                        }
 809                        r = 0;
 810                        if (status & 2)
 811                                break;
 812
 813                        DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
 814                        WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 815                                UVD_VCPU_CNTL__BLK_RST_MASK,
 816                                ~UVD_VCPU_CNTL__BLK_RST_MASK);
 817                        mdelay(10);
 818                        WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
 819                                ~UVD_VCPU_CNTL__BLK_RST_MASK);
 820
 821                        mdelay(10);
 822                        r = -1;
 823                }
 824
 825                if (r) {
 826                        DRM_ERROR("VCN decode not responding, giving up!!!\n");
 827                        return r;
 828                }
 829
 830                /* enable master interrupt */
 831                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 832                        UVD_MASTINT_EN__VCPU_EN_MASK,
 833                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 834
 835                /* clear the busy bit of VCN_STATUS */
 836                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
 837                        ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 838
 839                WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
 840
 841                ring = &adev->vcn.inst[i].ring_dec;
 842                /* force RBC into idle state */
 843                rb_bufsz = order_base_2(ring->ring_size);
 844                tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 845                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 846                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 847                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 848                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 849                WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
 850
 851                /* programm the RB_BASE for ring buffer */
 852                WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 853                        lower_32_bits(ring->gpu_addr));
 854                WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 855                        upper_32_bits(ring->gpu_addr));
 856
 857                /* Initialize the ring buffer's read and write pointers */
 858                WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
 859
 860                ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
 861                WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
 862                                lower_32_bits(ring->wptr));
 863                ring = &adev->vcn.inst[i].ring_enc[0];
 864                WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 865                WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 866                WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
 867                WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 868                WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
 869
 870                ring = &adev->vcn.inst[i].ring_enc[1];
 871                WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 872                WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 873                WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
 874                WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 875                WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
 876        }
 877        r = jpeg_v2_5_start(adev);
 878
 879        return r;
 880}
 881
 882static int vcn_v2_5_stop(struct amdgpu_device *adev)
 883{
 884        uint32_t tmp;
 885        int i, r;
 886
 887        r = jpeg_v2_5_stop(adev);
 888        if (r)
 889                return r;
 890
 891        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 892                if (adev->vcn.harvest_config & (1 << i))
 893                        continue;
 894                /* wait for vcn idle */
 895                SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
 896                if (r)
 897                        return r;
 898
 899                tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
 900                        UVD_LMI_STATUS__READ_CLEAN_MASK |
 901                        UVD_LMI_STATUS__WRITE_CLEAN_MASK |
 902                        UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
 903                SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
 904                if (r)
 905                        return r;
 906
 907                /* block LMI UMC channel */
 908                tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
 909                tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
 910                WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
 911
 912                tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
 913                        UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
 914                SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
 915                if (r)
 916                        return r;
 917
 918                /* block VCPU register access */
 919                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
 920                        UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
 921                        ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
 922
 923                /* reset VCPU */
 924                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 925                        UVD_VCPU_CNTL__BLK_RST_MASK,
 926                        ~UVD_VCPU_CNTL__BLK_RST_MASK);
 927
 928                /* disable VCPU clock */
 929                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
 930                        ~(UVD_VCPU_CNTL__CLK_EN_MASK));
 931
 932                /* clear status */
 933                WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
 934
 935                vcn_v2_5_enable_clock_gating(adev);
 936
 937                /* enable register anti-hang mechanism */
 938                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
 939                        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
 940                        ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
 941        }
 942
 943        if (adev->pm.dpm_enabled)
 944                amdgpu_dpm_enable_uvd(adev, false);
 945
 946        return 0;
 947}
 948
 949/**
 950 * vcn_v2_5_dec_ring_get_rptr - get read pointer
 951 *
 952 * @ring: amdgpu_ring pointer
 953 *
 954 * Returns the current hardware read pointer
 955 */
 956static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
 957{
 958        struct amdgpu_device *adev = ring->adev;
 959
 960        return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
 961}
 962
 963/**
 964 * vcn_v2_5_dec_ring_get_wptr - get write pointer
 965 *
 966 * @ring: amdgpu_ring pointer
 967 *
 968 * Returns the current hardware write pointer
 969 */
 970static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
 971{
 972        struct amdgpu_device *adev = ring->adev;
 973
 974        if (ring->use_doorbell)
 975                return adev->wb.wb[ring->wptr_offs];
 976        else
 977                return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 978}
 979
 980/**
 981 * vcn_v2_5_dec_ring_set_wptr - set write pointer
 982 *
 983 * @ring: amdgpu_ring pointer
 984 *
 985 * Commits the write pointer to the hardware
 986 */
 987static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
 988{
 989        struct amdgpu_device *adev = ring->adev;
 990
 991        if (ring->use_doorbell) {
 992                adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 993                WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 994        } else {
 995                WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 996        }
 997}
 998
 999static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1000        .type = AMDGPU_RING_TYPE_VCN_DEC,
1001        .align_mask = 0xf,
1002        .vmhub = AMDGPU_MMHUB_1,
1003        .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1004        .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1005        .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1006        .emit_frame_size =
1007                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1008                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1009                8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1010                14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1011                6,
1012        .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1013        .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1014        .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1015        .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1016        .test_ring = amdgpu_vcn_dec_ring_test_ring,
1017        .test_ib = amdgpu_vcn_dec_ring_test_ib,
1018        .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1019        .insert_start = vcn_v2_0_dec_ring_insert_start,
1020        .insert_end = vcn_v2_0_dec_ring_insert_end,
1021        .pad_ib = amdgpu_ring_generic_pad_ib,
1022        .begin_use = amdgpu_vcn_ring_begin_use,
1023        .end_use = amdgpu_vcn_ring_end_use,
1024        .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1025        .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1026        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1027};
1028
1029/**
1030 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1031 *
1032 * @ring: amdgpu_ring pointer
1033 *
1034 * Returns the current hardware enc read pointer
1035 */
1036static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1037{
1038        struct amdgpu_device *adev = ring->adev;
1039
1040        if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1041                return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1042        else
1043                return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1044}
1045
1046/**
1047 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1048 *
1049 * @ring: amdgpu_ring pointer
1050 *
1051 * Returns the current hardware enc write pointer
1052 */
1053static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1054{
1055        struct amdgpu_device *adev = ring->adev;
1056
1057        if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1058                if (ring->use_doorbell)
1059                        return adev->wb.wb[ring->wptr_offs];
1060                else
1061                        return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1062        } else {
1063                if (ring->use_doorbell)
1064                        return adev->wb.wb[ring->wptr_offs];
1065                else
1066                        return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1067        }
1068}
1069
1070/**
1071 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1072 *
1073 * @ring: amdgpu_ring pointer
1074 *
1075 * Commits the enc write pointer to the hardware
1076 */
1077static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1078{
1079        struct amdgpu_device *adev = ring->adev;
1080
1081        if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1082                if (ring->use_doorbell) {
1083                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1084                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1085                } else {
1086                        WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1087                }
1088        } else {
1089                if (ring->use_doorbell) {
1090                        adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1091                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1092                } else {
1093                        WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1094                }
1095        }
1096}
1097
1098static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1099        .type = AMDGPU_RING_TYPE_VCN_ENC,
1100        .align_mask = 0x3f,
1101        .nop = VCN_ENC_CMD_NO_OP,
1102        .vmhub = AMDGPU_MMHUB_1,
1103        .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1104        .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1105        .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1106        .emit_frame_size =
1107                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1108                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1109                4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1110                5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1111                1, /* vcn_v2_0_enc_ring_insert_end */
1112        .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1113        .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1114        .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1115        .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1116        .test_ring = amdgpu_vcn_enc_ring_test_ring,
1117        .test_ib = amdgpu_vcn_enc_ring_test_ib,
1118        .insert_nop = amdgpu_ring_insert_nop,
1119        .insert_end = vcn_v2_0_enc_ring_insert_end,
1120        .pad_ib = amdgpu_ring_generic_pad_ib,
1121        .begin_use = amdgpu_vcn_ring_begin_use,
1122        .end_use = amdgpu_vcn_ring_end_use,
1123        .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1124        .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1125        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1126};
1127
1128/**
1129 * vcn_v2_5_jpeg_ring_get_rptr - get read pointer
1130 *
1131 * @ring: amdgpu_ring pointer
1132 *
1133 * Returns the current hardware read pointer
1134 */
1135static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1136{
1137        struct amdgpu_device *adev = ring->adev;
1138
1139        return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
1140}
1141
1142/**
1143 * vcn_v2_5_jpeg_ring_get_wptr - get write pointer
1144 *
1145 * @ring: amdgpu_ring pointer
1146 *
1147 * Returns the current hardware write pointer
1148 */
1149static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1150{
1151        struct amdgpu_device *adev = ring->adev;
1152
1153        if (ring->use_doorbell)
1154                return adev->wb.wb[ring->wptr_offs];
1155        else
1156                return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
1157}
1158
1159/**
1160 * vcn_v2_5_jpeg_ring_set_wptr - set write pointer
1161 *
1162 * @ring: amdgpu_ring pointer
1163 *
1164 * Commits the write pointer to the hardware
1165 */
1166static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1167{
1168        struct amdgpu_device *adev = ring->adev;
1169
1170        if (ring->use_doorbell) {
1171                adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1172                WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1173        } else {
1174                WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1175        }
1176}
1177
1178static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = {
1179        .type = AMDGPU_RING_TYPE_VCN_JPEG,
1180        .align_mask = 0xf,
1181        .vmhub = AMDGPU_MMHUB_1,
1182        .get_rptr = vcn_v2_5_jpeg_ring_get_rptr,
1183        .get_wptr = vcn_v2_5_jpeg_ring_get_wptr,
1184        .set_wptr = vcn_v2_5_jpeg_ring_set_wptr,
1185        .emit_frame_size =
1186                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1187                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1188                8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */
1189                18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */
1190                8 + 16,
1191        .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */
1192        .emit_ib = vcn_v2_0_jpeg_ring_emit_ib,
1193        .emit_fence = vcn_v2_0_jpeg_ring_emit_fence,
1194        .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush,
1195        .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1196        .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1197        .insert_nop = vcn_v2_0_jpeg_ring_nop,
1198        .insert_start = vcn_v2_0_jpeg_ring_insert_start,
1199        .insert_end = vcn_v2_0_jpeg_ring_insert_end,
1200        .pad_ib = amdgpu_ring_generic_pad_ib,
1201        .begin_use = amdgpu_vcn_ring_begin_use,
1202        .end_use = amdgpu_vcn_ring_end_use,
1203        .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg,
1204        .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait,
1205        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1206};
1207
1208static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1209{
1210        int i;
1211
1212        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1213                if (adev->vcn.harvest_config & (1 << i))
1214                        continue;
1215                adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1216                adev->vcn.inst[i].ring_dec.me = i;
1217                DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1218        }
1219}
1220
1221static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1222{
1223        int i, j;
1224
1225        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1226                if (adev->vcn.harvest_config & (1 << j))
1227                        continue;
1228                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1229                        adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1230                        adev->vcn.inst[j].ring_enc[i].me = j;
1231                }
1232                DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1233        }
1234}
1235
1236static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1237{
1238        int i;
1239
1240        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1241                if (adev->vcn.harvest_config & (1 << i))
1242                        continue;
1243                adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs;
1244                adev->vcn.inst[i].ring_jpeg.me = i;
1245                DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i);
1246        }
1247}
1248
1249static bool vcn_v2_5_is_idle(void *handle)
1250{
1251        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1252        int i, ret = 1;
1253
1254        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1255                if (adev->vcn.harvest_config & (1 << i))
1256                        continue;
1257                ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1258        }
1259
1260        return ret;
1261}
1262
1263static int vcn_v2_5_wait_for_idle(void *handle)
1264{
1265        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1266        int i, ret = 0;
1267
1268        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1269                if (adev->vcn.harvest_config & (1 << i))
1270                        continue;
1271                SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1272                        UVD_STATUS__IDLE, ret);
1273                if (ret)
1274                        return ret;
1275        }
1276
1277        return ret;
1278}
1279
1280static int vcn_v2_5_set_clockgating_state(void *handle,
1281                                          enum amd_clockgating_state state)
1282{
1283        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1285
1286        if (enable) {
1287                if (vcn_v2_5_is_idle(handle))
1288                        return -EBUSY;
1289                vcn_v2_5_enable_clock_gating(adev);
1290        } else {
1291                vcn_v2_5_disable_clock_gating(adev);
1292        }
1293
1294        return 0;
1295}
1296
1297static int vcn_v2_5_set_powergating_state(void *handle,
1298                                          enum amd_powergating_state state)
1299{
1300        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1301        int ret;
1302
1303        if(state == adev->vcn.cur_state)
1304                return 0;
1305
1306        if (state == AMD_PG_STATE_GATE)
1307                ret = vcn_v2_5_stop(adev);
1308        else
1309                ret = vcn_v2_5_start(adev);
1310
1311        if(!ret)
1312                adev->vcn.cur_state = state;
1313
1314        return ret;
1315}
1316
1317static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1318                                        struct amdgpu_irq_src *source,
1319                                        unsigned type,
1320                                        enum amdgpu_interrupt_state state)
1321{
1322        return 0;
1323}
1324
1325static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1326                                      struct amdgpu_irq_src *source,
1327                                      struct amdgpu_iv_entry *entry)
1328{
1329        uint32_t ip_instance;
1330
1331        switch (entry->client_id) {
1332        case SOC15_IH_CLIENTID_VCN:
1333                ip_instance = 0;
1334                break;
1335        case SOC15_IH_CLIENTID_VCN1:
1336                ip_instance = 1;
1337                break;
1338        default:
1339                DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1340                return 0;
1341        }
1342
1343        DRM_DEBUG("IH: VCN TRAP\n");
1344
1345        switch (entry->src_id) {
1346        case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1347                amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1348                break;
1349        case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1350                amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1351                break;
1352        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1353                amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1354                break;
1355        case VCN_2_0__SRCID__JPEG_DECODE:
1356                amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg);
1357                break;
1358        default:
1359                DRM_ERROR("Unhandled interrupt: %d %d\n",
1360                          entry->src_id, entry->src_data[0]);
1361                break;
1362        }
1363
1364        return 0;
1365}
1366
1367static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1368        .set = vcn_v2_5_set_interrupt_state,
1369        .process = vcn_v2_5_process_interrupt,
1370};
1371
1372static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1373{
1374        int i;
1375
1376        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1377                if (adev->vcn.harvest_config & (1 << i))
1378                        continue;
1379                adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
1380                adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1381        }
1382}
1383
1384static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1385        .name = "vcn_v2_5",
1386        .early_init = vcn_v2_5_early_init,
1387        .late_init = NULL,
1388        .sw_init = vcn_v2_5_sw_init,
1389        .sw_fini = vcn_v2_5_sw_fini,
1390        .hw_init = vcn_v2_5_hw_init,
1391        .hw_fini = vcn_v2_5_hw_fini,
1392        .suspend = vcn_v2_5_suspend,
1393        .resume = vcn_v2_5_resume,
1394        .is_idle = vcn_v2_5_is_idle,
1395        .wait_for_idle = vcn_v2_5_wait_for_idle,
1396        .check_soft_reset = NULL,
1397        .pre_soft_reset = NULL,
1398        .soft_reset = NULL,
1399        .post_soft_reset = NULL,
1400        .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1401        .set_powergating_state = vcn_v2_5_set_powergating_state,
1402};
1403
1404const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1405{
1406                .type = AMD_IP_BLOCK_TYPE_VCN,
1407                .major = 2,
1408                .minor = 5,
1409                .rev = 0,
1410                .funcs = &vcn_v2_5_ip_funcs,
1411};
1412