linux/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27#include "amdgpu_vcn.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31
  32#include "vcn/vcn_1_0_offset.h"
  33#include "vcn/vcn_1_0_sh_mask.h"
  34#include "hdp/hdp_4_0_offset.h"
  35#include "mmhub/mmhub_9_1_offset.h"
  36#include "mmhub/mmhub_9_1_sh_mask.h"
  37
  38#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
  39
  40static int vcn_v1_0_stop(struct amdgpu_device *adev);
  41static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  42static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  43static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
  44static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
  45static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
  46
  47/**
  48 * vcn_v1_0_early_init - set function pointers
  49 *
  50 * @handle: amdgpu_device pointer
  51 *
  52 * Set ring and irq function pointers
  53 */
  54static int vcn_v1_0_early_init(void *handle)
  55{
  56        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  57
  58        adev->vcn.num_enc_rings = 2;
  59
  60        vcn_v1_0_set_dec_ring_funcs(adev);
  61        vcn_v1_0_set_enc_ring_funcs(adev);
  62        vcn_v1_0_set_jpeg_ring_funcs(adev);
  63        vcn_v1_0_set_irq_funcs(adev);
  64
  65        return 0;
  66}
  67
  68/**
  69 * vcn_v1_0_sw_init - sw init for VCN block
  70 *
  71 * @handle: amdgpu_device pointer
  72 *
  73 * Load firmware and sw initialization
  74 */
  75static int vcn_v1_0_sw_init(void *handle)
  76{
  77        struct amdgpu_ring *ring;
  78        int i, r;
  79        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  80
  81        /* VCN DEC TRAP */
  82        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
  83        if (r)
  84                return r;
  85
  86        /* VCN ENC TRAP */
  87        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
  88                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
  89                                        &adev->vcn.irq);
  90                if (r)
  91                        return r;
  92        }
  93
  94        /* VCN JPEG TRAP */
  95        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
  96        if (r)
  97                return r;
  98
  99        r = amdgpu_vcn_sw_init(adev);
 100        if (r)
 101                return r;
 102
 103        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 104                const struct common_firmware_header *hdr;
 105                hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 106                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
 107                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
 108                adev->firmware.fw_size +=
 109                        ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 110                DRM_INFO("PSP loading VCN firmware\n");
 111        }
 112
 113        r = amdgpu_vcn_resume(adev);
 114        if (r)
 115                return r;
 116
 117        ring = &adev->vcn.ring_dec;
 118        sprintf(ring->name, "vcn_dec");
 119        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
 120        if (r)
 121                return r;
 122
 123        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 124                ring = &adev->vcn.ring_enc[i];
 125                sprintf(ring->name, "vcn_enc%d", i);
 126                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
 127                if (r)
 128                        return r;
 129        }
 130
 131        ring = &adev->vcn.ring_jpeg;
 132        sprintf(ring->name, "vcn_jpeg");
 133        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
 134        if (r)
 135                return r;
 136
 137        return r;
 138}
 139
 140/**
 141 * vcn_v1_0_sw_fini - sw fini for VCN block
 142 *
 143 * @handle: amdgpu_device pointer
 144 *
 145 * VCN suspend and free up sw allocation
 146 */
 147static int vcn_v1_0_sw_fini(void *handle)
 148{
 149        int r;
 150        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 151
 152        r = amdgpu_vcn_suspend(adev);
 153        if (r)
 154                return r;
 155
 156        r = amdgpu_vcn_sw_fini(adev);
 157
 158        return r;
 159}
 160
 161/**
 162 * vcn_v1_0_hw_init - start and test VCN block
 163 *
 164 * @handle: amdgpu_device pointer
 165 *
 166 * Initialize the hardware, boot up the VCPU and do some testing
 167 */
 168static int vcn_v1_0_hw_init(void *handle)
 169{
 170        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 171        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 172        int i, r;
 173
 174        ring->ready = true;
 175        r = amdgpu_ring_test_ring(ring);
 176        if (r) {
 177                ring->ready = false;
 178                goto done;
 179        }
 180
 181        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 182                ring = &adev->vcn.ring_enc[i];
 183                ring->ready = true;
 184                r = amdgpu_ring_test_ring(ring);
 185                if (r) {
 186                        ring->ready = false;
 187                        goto done;
 188                }
 189        }
 190
 191        ring = &adev->vcn.ring_jpeg;
 192        ring->ready = true;
 193        r = amdgpu_ring_test_ring(ring);
 194        if (r) {
 195                ring->ready = false;
 196                goto done;
 197        }
 198
 199done:
 200        if (!r)
 201                DRM_INFO("VCN decode and encode initialized successfully.\n");
 202
 203        return r;
 204}
 205
 206/**
 207 * vcn_v1_0_hw_fini - stop the hardware block
 208 *
 209 * @handle: amdgpu_device pointer
 210 *
 211 * Stop the VCN block, mark ring as not ready any more
 212 */
 213static int vcn_v1_0_hw_fini(void *handle)
 214{
 215        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 216        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 217
 218        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
 219                vcn_v1_0_stop(adev);
 220
 221        ring->ready = false;
 222
 223        return 0;
 224}
 225
 226/**
 227 * vcn_v1_0_suspend - suspend VCN block
 228 *
 229 * @handle: amdgpu_device pointer
 230 *
 231 * HW fini and suspend VCN block
 232 */
 233static int vcn_v1_0_suspend(void *handle)
 234{
 235        int r;
 236        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 237
 238        r = vcn_v1_0_hw_fini(adev);
 239        if (r)
 240                return r;
 241
 242        r = amdgpu_vcn_suspend(adev);
 243
 244        return r;
 245}
 246
 247/**
 248 * vcn_v1_0_resume - resume VCN block
 249 *
 250 * @handle: amdgpu_device pointer
 251 *
 252 * Resume firmware and hw init VCN block
 253 */
 254static int vcn_v1_0_resume(void *handle)
 255{
 256        int r;
 257        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 258
 259        r = amdgpu_vcn_resume(adev);
 260        if (r)
 261                return r;
 262
 263        r = vcn_v1_0_hw_init(adev);
 264
 265        return r;
 266}
 267
 268/**
 269 * vcn_v1_0_mc_resume - memory controller programming
 270 *
 271 * @adev: amdgpu_device pointer
 272 *
 273 * Let the VCN memory controller know it's offsets
 274 */
 275static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
 276{
 277        uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
 278        uint32_t offset;
 279
 280        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 281                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 282                             (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
 283                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 284                             (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
 285                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
 286                offset = 0;
 287        } else {
 288                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 289                        lower_32_bits(adev->vcn.gpu_addr));
 290                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 291                        upper_32_bits(adev->vcn.gpu_addr));
 292                offset = size;
 293                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
 294                             AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 295        }
 296
 297        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
 298
 299        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 300                     lower_32_bits(adev->vcn.gpu_addr + offset));
 301        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 302                     upper_32_bits(adev->vcn.gpu_addr + offset));
 303        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
 304        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
 305
 306        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 307                     lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
 308        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 309                     upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
 310        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
 311        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
 312                        AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
 313
 314        WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
 315                        adev->gfx.config.gb_addr_config);
 316        WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
 317                        adev->gfx.config.gb_addr_config);
 318        WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
 319                        adev->gfx.config.gb_addr_config);
 320}
 321
 322/**
 323 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
 324 *
 325 * @adev: amdgpu_device pointer
 326 * @sw: enable SW clock gating
 327 *
 328 * Disable clock gating for VCN block
 329 */
 330static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
 331{
 332        uint32_t data;
 333
 334        /* JPEG disable CGC */
 335        data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
 336
 337        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 338                data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 339        else
 340                data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 341
 342        data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 343        data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 344        WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
 345
 346        data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
 347        data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
 348        WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
 349
 350        /* UVD disable CGC */
 351        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 352        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 353                data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 354        else
 355                data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 356
 357        data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 358        data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 359        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 360
 361        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
 362        data &= ~(UVD_CGC_GATE__SYS_MASK
 363                | UVD_CGC_GATE__UDEC_MASK
 364                | UVD_CGC_GATE__MPEG2_MASK
 365                | UVD_CGC_GATE__REGS_MASK
 366                | UVD_CGC_GATE__RBC_MASK
 367                | UVD_CGC_GATE__LMI_MC_MASK
 368                | UVD_CGC_GATE__LMI_UMC_MASK
 369                | UVD_CGC_GATE__IDCT_MASK
 370                | UVD_CGC_GATE__MPRD_MASK
 371                | UVD_CGC_GATE__MPC_MASK
 372                | UVD_CGC_GATE__LBSI_MASK
 373                | UVD_CGC_GATE__LRBBM_MASK
 374                | UVD_CGC_GATE__UDEC_RE_MASK
 375                | UVD_CGC_GATE__UDEC_CM_MASK
 376                | UVD_CGC_GATE__UDEC_IT_MASK
 377                | UVD_CGC_GATE__UDEC_DB_MASK
 378                | UVD_CGC_GATE__UDEC_MP_MASK
 379                | UVD_CGC_GATE__WCB_MASK
 380                | UVD_CGC_GATE__VCPU_MASK
 381                | UVD_CGC_GATE__SCPU_MASK);
 382        WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
 383
 384        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 385        data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
 386                | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
 387                | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
 388                | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
 389                | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
 390                | UVD_CGC_CTRL__SYS_MODE_MASK
 391                | UVD_CGC_CTRL__UDEC_MODE_MASK
 392                | UVD_CGC_CTRL__MPEG2_MODE_MASK
 393                | UVD_CGC_CTRL__REGS_MODE_MASK
 394                | UVD_CGC_CTRL__RBC_MODE_MASK
 395                | UVD_CGC_CTRL__LMI_MC_MODE_MASK
 396                | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
 397                | UVD_CGC_CTRL__IDCT_MODE_MASK
 398                | UVD_CGC_CTRL__MPRD_MODE_MASK
 399                | UVD_CGC_CTRL__MPC_MODE_MASK
 400                | UVD_CGC_CTRL__LBSI_MODE_MASK
 401                | UVD_CGC_CTRL__LRBBM_MODE_MASK
 402                | UVD_CGC_CTRL__WCB_MODE_MASK
 403                | UVD_CGC_CTRL__VCPU_MODE_MASK
 404                | UVD_CGC_CTRL__SCPU_MODE_MASK);
 405        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 406
 407        /* turn on */
 408        data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
 409        data |= (UVD_SUVD_CGC_GATE__SRE_MASK
 410                | UVD_SUVD_CGC_GATE__SIT_MASK
 411                | UVD_SUVD_CGC_GATE__SMP_MASK
 412                | UVD_SUVD_CGC_GATE__SCM_MASK
 413                | UVD_SUVD_CGC_GATE__SDB_MASK
 414                | UVD_SUVD_CGC_GATE__SRE_H264_MASK
 415                | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
 416                | UVD_SUVD_CGC_GATE__SIT_H264_MASK
 417                | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
 418                | UVD_SUVD_CGC_GATE__SCM_H264_MASK
 419                | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
 420                | UVD_SUVD_CGC_GATE__SDB_H264_MASK
 421                | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
 422                | UVD_SUVD_CGC_GATE__SCLR_MASK
 423                | UVD_SUVD_CGC_GATE__UVD_SC_MASK
 424                | UVD_SUVD_CGC_GATE__ENT_MASK
 425                | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
 426                | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
 427                | UVD_SUVD_CGC_GATE__SITE_MASK
 428                | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
 429                | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
 430                | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
 431                | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
 432                | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
 433        WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
 434
 435        data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
 436        data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
 437                | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
 438                | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
 439                | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
 440                | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
 441                | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
 442                | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
 443                | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
 444                | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
 445                | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
 446        WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
 447}
 448
 449/**
 450 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
 451 *
 452 * @adev: amdgpu_device pointer
 453 * @sw: enable SW clock gating
 454 *
 455 * Enable clock gating for VCN block
 456 */
 457static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
 458{
 459        uint32_t data = 0;
 460
 461        /* enable JPEG CGC */
 462        data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
 463        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 464                data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 465        else
 466                data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 467        data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 468        data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 469        WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
 470
 471        data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
 472        data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
 473        WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
 474
 475        /* enable UVD CGC */
 476        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 477        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
 478                data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 479        else
 480                data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
 481        data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
 482        data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
 483        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 484
 485        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 486        data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
 487                | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
 488                | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
 489                | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
 490                | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
 491                | UVD_CGC_CTRL__SYS_MODE_MASK
 492                | UVD_CGC_CTRL__UDEC_MODE_MASK
 493                | UVD_CGC_CTRL__MPEG2_MODE_MASK
 494                | UVD_CGC_CTRL__REGS_MODE_MASK
 495                | UVD_CGC_CTRL__RBC_MODE_MASK
 496                | UVD_CGC_CTRL__LMI_MC_MODE_MASK
 497                | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
 498                | UVD_CGC_CTRL__IDCT_MODE_MASK
 499                | UVD_CGC_CTRL__MPRD_MODE_MASK
 500                | UVD_CGC_CTRL__MPC_MODE_MASK
 501                | UVD_CGC_CTRL__LBSI_MODE_MASK
 502                | UVD_CGC_CTRL__LRBBM_MODE_MASK
 503                | UVD_CGC_CTRL__WCB_MODE_MASK
 504                | UVD_CGC_CTRL__VCPU_MODE_MASK
 505                | UVD_CGC_CTRL__SCPU_MODE_MASK);
 506        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
 507
 508        data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
 509        data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
 510                | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
 511                | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
 512                | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
 513                | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
 514                | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
 515                | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
 516                | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
 517                | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
 518                | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
 519        WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
 520}
 521
 522static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
 523{
 524        uint32_t data = 0;
 525        int ret;
 526
 527        if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 528                data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 529                        | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
 530                        | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
 531                        | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
 532                        | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
 533                        | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
 534                        | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
 535                        | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
 536                        | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
 537                        | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
 538                        | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
 539
 540                WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
 541                SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret);
 542        } else {
 543                data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 544                        | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
 545                        | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
 546                        | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
 547                        | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
 548                        | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
 549                        | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
 550                        | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
 551                        | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
 552                        | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
 553                        | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
 554                WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
 555                SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFFFFF, ret);
 556        }
 557
 558        /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
 559
 560        data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
 561        data &= ~0x103;
 562        if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
 563                data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
 564
 565        WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
 566}
 567
 568static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
 569{
 570        uint32_t data = 0;
 571        int ret;
 572
 573        if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 574                /* Before power off, this indicator has to be turned on */
 575                data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
 576                data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
 577                data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
 578                WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
 579
 580
 581                data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 582                        | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
 583                        | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
 584                        | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
 585                        | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
 586                        | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
 587                        | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
 588                        | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
 589                        | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
 590                        | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
 591                        | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
 592
 593                WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
 594
 595                data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
 596                        | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
 597                        | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
 598                        | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
 599                        | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
 600                        | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
 601                        | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
 602                        | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
 603                        | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
 604                        | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
 605                        | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
 606                SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret);
 607        }
 608}
 609
 610/**
 611 * vcn_v1_0_start - start VCN block
 612 *
 613 * @adev: amdgpu_device pointer
 614 *
 615 * Setup and start the VCN block
 616 */
 617static int vcn_v1_0_start(struct amdgpu_device *adev)
 618{
 619        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 620        uint32_t rb_bufsz, tmp;
 621        uint32_t lmi_swap_cntl;
 622        int i, j, r;
 623
 624        /* disable byte swapping */
 625        lmi_swap_cntl = 0;
 626
 627        vcn_1_0_disable_static_power_gating(adev);
 628        /* disable clock gating */
 629        vcn_v1_0_disable_clock_gating(adev);
 630
 631        vcn_v1_0_mc_resume(adev);
 632
 633        /* disable interupt */
 634        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
 635                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
 636
 637        /* stall UMC and register bus before resetting VCPU */
 638        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
 639                        UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 640                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 641        mdelay(1);
 642
 643        /* put LMI, VCPU, RBC etc... into reset */
 644        WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
 645                UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 646                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 647                UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 648                UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 649                UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 650                UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 651                UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 652                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 653        mdelay(5);
 654
 655        /* initialize VCN memory controller */
 656        WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL,
 657                (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 658                UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 659                UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 660                UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 661                UVD_LMI_CTRL__REQ_MODE_MASK |
 662                0x00100000L);
 663
 664#ifdef __BIG_ENDIAN
 665        /* swap (8 in 32) RB and IB */
 666        lmi_swap_cntl = 0xa;
 667#endif
 668        WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 669
 670        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040);
 671        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0);
 672        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040);
 673        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0);
 674        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0);
 675        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88);
 676
 677        /* take all subblocks out of reset, except VCPU */
 678        WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
 679                        UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 680        mdelay(5);
 681
 682        /* enable VCPU clock */
 683        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL,
 684                        UVD_VCPU_CNTL__CLK_EN_MASK);
 685
 686        /* enable UMC */
 687        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
 688                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 689
 690        /* boot up the VCPU */
 691        WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0);
 692        mdelay(10);
 693
 694        for (i = 0; i < 10; ++i) {
 695                uint32_t status;
 696
 697                for (j = 0; j < 100; ++j) {
 698                        status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
 699                        if (status & 2)
 700                                break;
 701                        mdelay(10);
 702                }
 703                r = 0;
 704                if (status & 2)
 705                        break;
 706
 707                DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
 708                WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
 709                                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 710                                ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 711                mdelay(10);
 712                WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
 713                                ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 714                mdelay(10);
 715                r = -1;
 716        }
 717
 718        if (r) {
 719                DRM_ERROR("VCN decode not responding, giving up!!!\n");
 720                return r;
 721        }
 722        /* enable master interrupt */
 723        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
 724                (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 725                ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 726
 727        /* clear the bit 4 of VCN_STATUS */
 728        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
 729                        ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 730
 731        /* force RBC into idle state */
 732        rb_bufsz = order_base_2(ring->ring_size);
 733        tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 734        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 735        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 736        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 737        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 738        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 739        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 740
 741        /* set the write pointer delay */
 742        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
 743
 744        /* set the wb address */
 745        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
 746                        (upper_32_bits(ring->gpu_addr) >> 2));
 747
 748        /* programm the RB_BASE for ring buffer */
 749        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 750                        lower_32_bits(ring->gpu_addr));
 751        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 752                        upper_32_bits(ring->gpu_addr));
 753
 754        /* Initialize the ring buffer's read and write pointers */
 755        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
 756
 757        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
 758        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
 759                        lower_32_bits(ring->wptr));
 760
 761        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
 762                        ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 763
 764        ring = &adev->vcn.ring_enc[0];
 765        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 766        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 767        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
 768        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 769        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 770
 771        ring = &adev->vcn.ring_enc[1];
 772        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 773        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 774        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
 775        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 776        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 777
 778        ring = &adev->vcn.ring_jpeg;
 779        WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
 780        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
 781        WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
 782        WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
 783        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
 784        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
 785        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
 786
 787        /* initialize wptr */
 788        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
 789
 790        /* copy patch commands to the jpeg ring */
 791        vcn_v1_0_jpeg_ring_set_patch_ring(ring,
 792                (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
 793
 794        return 0;
 795}
 796
 797/**
 798 * vcn_v1_0_stop - stop VCN block
 799 *
 800 * @adev: amdgpu_device pointer
 801 *
 802 * stop the VCN block
 803 */
 804static int vcn_v1_0_stop(struct amdgpu_device *adev)
 805{
 806        /* force RBC into idle state */
 807        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101);
 808
 809        /* Stall UMC and register bus before resetting VCPU */
 810        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
 811                        UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 812                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 813        mdelay(1);
 814
 815        /* put VCPU into reset */
 816        WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET,
 817                        UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 818        mdelay(5);
 819
 820        /* disable VCPU clock */
 821        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0);
 822
 823        /* Unstall UMC and register bus */
 824        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
 825                        ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 826
 827        WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
 828
 829        vcn_v1_0_enable_clock_gating(adev);
 830        vcn_1_0_enable_static_power_gating(adev);
 831        return 0;
 832}
 833
 834static bool vcn_v1_0_is_idle(void *handle)
 835{
 836        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 837
 838        return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2);
 839}
 840
 841static int vcn_v1_0_wait_for_idle(void *handle)
 842{
 843        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 844        int ret = 0;
 845
 846        SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret);
 847
 848        return ret;
 849}
 850
 851static int vcn_v1_0_set_clockgating_state(void *handle,
 852                                          enum amd_clockgating_state state)
 853{
 854        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 855        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 856
 857        if (enable) {
 858                /* wait for STATUS to clear */
 859                if (vcn_v1_0_is_idle(handle))
 860                        return -EBUSY;
 861                vcn_v1_0_enable_clock_gating(adev);
 862        } else {
 863                /* disable HW gating and enable Sw gating */
 864                vcn_v1_0_disable_clock_gating(adev);
 865        }
 866        return 0;
 867}
 868
 869/**
 870 * vcn_v1_0_dec_ring_get_rptr - get read pointer
 871 *
 872 * @ring: amdgpu_ring pointer
 873 *
 874 * Returns the current hardware read pointer
 875 */
 876static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
 877{
 878        struct amdgpu_device *adev = ring->adev;
 879
 880        return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
 881}
 882
 883/**
 884 * vcn_v1_0_dec_ring_get_wptr - get write pointer
 885 *
 886 * @ring: amdgpu_ring pointer
 887 *
 888 * Returns the current hardware write pointer
 889 */
 890static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
 891{
 892        struct amdgpu_device *adev = ring->adev;
 893
 894        return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
 895}
 896
 897/**
 898 * vcn_v1_0_dec_ring_set_wptr - set write pointer
 899 *
 900 * @ring: amdgpu_ring pointer
 901 *
 902 * Commits the write pointer to the hardware
 903 */
 904static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
 905{
 906        struct amdgpu_device *adev = ring->adev;
 907
 908        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 909}
 910
 911/**
 912 * vcn_v1_0_dec_ring_insert_start - insert a start command
 913 *
 914 * @ring: amdgpu_ring pointer
 915 *
 916 * Write a start command to the ring.
 917 */
 918static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
 919{
 920        struct amdgpu_device *adev = ring->adev;
 921
 922        amdgpu_ring_write(ring,
 923                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
 924        amdgpu_ring_write(ring, 0);
 925        amdgpu_ring_write(ring,
 926                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
 927        amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
 928}
 929
 930/**
 931 * vcn_v1_0_dec_ring_insert_end - insert a end command
 932 *
 933 * @ring: amdgpu_ring pointer
 934 *
 935 * Write a end command to the ring.
 936 */
 937static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
 938{
 939        struct amdgpu_device *adev = ring->adev;
 940
 941        amdgpu_ring_write(ring,
 942                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
 943        amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
 944}
 945
 946/**
 947 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
 948 *
 949 * @ring: amdgpu_ring pointer
 950 * @fence: fence to emit
 951 *
 952 * Write a fence and a trap command to the ring.
 953 */
 954static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 955                                     unsigned flags)
 956{
 957        struct amdgpu_device *adev = ring->adev;
 958
 959        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 960
 961        amdgpu_ring_write(ring,
 962                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
 963        amdgpu_ring_write(ring, seq);
 964        amdgpu_ring_write(ring,
 965                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
 966        amdgpu_ring_write(ring, addr & 0xffffffff);
 967        amdgpu_ring_write(ring,
 968                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
 969        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 970        amdgpu_ring_write(ring,
 971                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
 972        amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
 973
 974        amdgpu_ring_write(ring,
 975                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
 976        amdgpu_ring_write(ring, 0);
 977        amdgpu_ring_write(ring,
 978                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
 979        amdgpu_ring_write(ring, 0);
 980        amdgpu_ring_write(ring,
 981                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
 982        amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
 983}
 984
 985/**
 986 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
 987 *
 988 * @ring: amdgpu_ring pointer
 989 * @ib: indirect buffer to execute
 990 *
 991 * Write ring commands to execute the indirect buffer
 992 */
 993static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
 994                                  struct amdgpu_ib *ib,
 995                                  unsigned vmid, bool ctx_switch)
 996{
 997        struct amdgpu_device *adev = ring->adev;
 998
 999        amdgpu_ring_write(ring,
1000                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1001        amdgpu_ring_write(ring, vmid);
1002
1003        amdgpu_ring_write(ring,
1004                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1005        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1006        amdgpu_ring_write(ring,
1007                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1008        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1009        amdgpu_ring_write(ring,
1010                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1011        amdgpu_ring_write(ring, ib->length_dw);
1012}
1013
1014static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1015                                            uint32_t reg, uint32_t val,
1016                                            uint32_t mask)
1017{
1018        struct amdgpu_device *adev = ring->adev;
1019
1020        amdgpu_ring_write(ring,
1021                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1022        amdgpu_ring_write(ring, reg << 2);
1023        amdgpu_ring_write(ring,
1024                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1025        amdgpu_ring_write(ring, val);
1026        amdgpu_ring_write(ring,
1027                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1028        amdgpu_ring_write(ring, mask);
1029        amdgpu_ring_write(ring,
1030                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1031        amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1032}
1033
1034static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1035                                            unsigned vmid, uint64_t pd_addr)
1036{
1037        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1038        uint32_t data0, data1, mask;
1039
1040        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1041
1042        /* wait for register write */
1043        data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1044        data1 = lower_32_bits(pd_addr);
1045        mask = 0xffffffff;
1046        vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1047}
1048
1049static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1050                                        uint32_t reg, uint32_t val)
1051{
1052        struct amdgpu_device *adev = ring->adev;
1053
1054        amdgpu_ring_write(ring,
1055                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1056        amdgpu_ring_write(ring, reg << 2);
1057        amdgpu_ring_write(ring,
1058                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1059        amdgpu_ring_write(ring, val);
1060        amdgpu_ring_write(ring,
1061                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1062        amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1063}
1064
1065/**
1066 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1067 *
1068 * @ring: amdgpu_ring pointer
1069 *
1070 * Returns the current hardware enc read pointer
1071 */
1072static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1073{
1074        struct amdgpu_device *adev = ring->adev;
1075
1076        if (ring == &adev->vcn.ring_enc[0])
1077                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1078        else
1079                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1080}
1081
1082 /**
1083 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1084 *
1085 * @ring: amdgpu_ring pointer
1086 *
1087 * Returns the current hardware enc write pointer
1088 */
1089static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1090{
1091        struct amdgpu_device *adev = ring->adev;
1092
1093        if (ring == &adev->vcn.ring_enc[0])
1094                return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1095        else
1096                return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1097}
1098
1099 /**
1100 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1101 *
1102 * @ring: amdgpu_ring pointer
1103 *
1104 * Commits the enc write pointer to the hardware
1105 */
1106static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1107{
1108        struct amdgpu_device *adev = ring->adev;
1109
1110        if (ring == &adev->vcn.ring_enc[0])
1111                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1112                        lower_32_bits(ring->wptr));
1113        else
1114                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1115                        lower_32_bits(ring->wptr));
1116}
1117
1118/**
1119 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1120 *
1121 * @ring: amdgpu_ring pointer
1122 * @fence: fence to emit
1123 *
1124 * Write enc a fence and a trap command to the ring.
1125 */
1126static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1127                        u64 seq, unsigned flags)
1128{
1129        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1130
1131        amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1132        amdgpu_ring_write(ring, addr);
1133        amdgpu_ring_write(ring, upper_32_bits(addr));
1134        amdgpu_ring_write(ring, seq);
1135        amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1136}
1137
1138static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1139{
1140        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1141}
1142
1143/**
1144 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1145 *
1146 * @ring: amdgpu_ring pointer
1147 * @ib: indirect buffer to execute
1148 *
1149 * Write enc ring commands to execute the indirect buffer
1150 */
1151static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1152                struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1153{
1154        amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1155        amdgpu_ring_write(ring, vmid);
1156        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1157        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1158        amdgpu_ring_write(ring, ib->length_dw);
1159}
1160
1161static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1162                                            uint32_t reg, uint32_t val,
1163                                            uint32_t mask)
1164{
1165        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1166        amdgpu_ring_write(ring, reg << 2);
1167        amdgpu_ring_write(ring, mask);
1168        amdgpu_ring_write(ring, val);
1169}
1170
1171static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1172                                            unsigned int vmid, uint64_t pd_addr)
1173{
1174        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1175
1176        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1177
1178        /* wait for reg writes */
1179        vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1180                                        lower_32_bits(pd_addr), 0xffffffff);
1181}
1182
1183static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1184                                        uint32_t reg, uint32_t val)
1185{
1186        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1187        amdgpu_ring_write(ring, reg << 2);
1188        amdgpu_ring_write(ring, val);
1189}
1190
1191
1192/**
1193 * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
1194 *
1195 * @ring: amdgpu_ring pointer
1196 *
1197 * Returns the current hardware read pointer
1198 */
1199static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
1200{
1201        struct amdgpu_device *adev = ring->adev;
1202
1203        return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
1204}
1205
1206/**
1207 * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
1208 *
1209 * @ring: amdgpu_ring pointer
1210 *
1211 * Returns the current hardware write pointer
1212 */
1213static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
1214{
1215        struct amdgpu_device *adev = ring->adev;
1216
1217        return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1218}
1219
1220/**
1221 * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
1222 *
1223 * @ring: amdgpu_ring pointer
1224 *
1225 * Commits the write pointer to the hardware
1226 */
1227static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
1228{
1229        struct amdgpu_device *adev = ring->adev;
1230
1231        WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
1232}
1233
1234/**
1235 * vcn_v1_0_jpeg_ring_insert_start - insert a start command
1236 *
1237 * @ring: amdgpu_ring pointer
1238 *
1239 * Write a start command to the ring.
1240 */
1241static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
1242{
1243        struct amdgpu_device *adev = ring->adev;
1244
1245        amdgpu_ring_write(ring,
1246                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1247        amdgpu_ring_write(ring, 0x68e04);
1248
1249        amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1250        amdgpu_ring_write(ring, 0x80010000);
1251}
1252
1253/**
1254 * vcn_v1_0_jpeg_ring_insert_end - insert a end command
1255 *
1256 * @ring: amdgpu_ring pointer
1257 *
1258 * Write a end command to the ring.
1259 */
1260static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
1261{
1262        struct amdgpu_device *adev = ring->adev;
1263
1264        amdgpu_ring_write(ring,
1265                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1266        amdgpu_ring_write(ring, 0x68e04);
1267
1268        amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1269        amdgpu_ring_write(ring, 0x00010000);
1270}
1271
1272/**
1273 * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
1274 *
1275 * @ring: amdgpu_ring pointer
1276 * @fence: fence to emit
1277 *
1278 * Write a fence and a trap command to the ring.
1279 */
1280static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1281                                     unsigned flags)
1282{
1283        struct amdgpu_device *adev = ring->adev;
1284
1285        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1286
1287        amdgpu_ring_write(ring,
1288                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
1289        amdgpu_ring_write(ring, seq);
1290
1291        amdgpu_ring_write(ring,
1292                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
1293        amdgpu_ring_write(ring, seq);
1294
1295        amdgpu_ring_write(ring,
1296                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1297        amdgpu_ring_write(ring, lower_32_bits(addr));
1298
1299        amdgpu_ring_write(ring,
1300                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1301        amdgpu_ring_write(ring, upper_32_bits(addr));
1302
1303        amdgpu_ring_write(ring,
1304                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
1305        amdgpu_ring_write(ring, 0x8);
1306
1307        amdgpu_ring_write(ring,
1308                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
1309        amdgpu_ring_write(ring, 0);
1310
1311        amdgpu_ring_write(ring,
1312                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1313        amdgpu_ring_write(ring, 0x01400200);
1314
1315        amdgpu_ring_write(ring,
1316                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1317        amdgpu_ring_write(ring, seq);
1318
1319        amdgpu_ring_write(ring,
1320                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1321        amdgpu_ring_write(ring, lower_32_bits(addr));
1322
1323        amdgpu_ring_write(ring,
1324                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1325        amdgpu_ring_write(ring, upper_32_bits(addr));
1326
1327        amdgpu_ring_write(ring,
1328                PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
1329        amdgpu_ring_write(ring, 0xffffffff);
1330
1331        amdgpu_ring_write(ring,
1332                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1333        amdgpu_ring_write(ring, 0x3fbc);
1334
1335        amdgpu_ring_write(ring,
1336                PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1337        amdgpu_ring_write(ring, 0x1);
1338}
1339
1340/**
1341 * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
1342 *
1343 * @ring: amdgpu_ring pointer
1344 * @ib: indirect buffer to execute
1345 *
1346 * Write ring commands to execute the indirect buffer.
1347 */
1348static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
1349                                  struct amdgpu_ib *ib,
1350                                  unsigned vmid, bool ctx_switch)
1351{
1352        struct amdgpu_device *adev = ring->adev;
1353
1354        amdgpu_ring_write(ring,
1355                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
1356        amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1357
1358        amdgpu_ring_write(ring,
1359                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
1360        amdgpu_ring_write(ring, (vmid | (vmid << 4)));
1361
1362        amdgpu_ring_write(ring,
1363                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1364        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1365
1366        amdgpu_ring_write(ring,
1367                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1368        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1369
1370        amdgpu_ring_write(ring,
1371                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
1372        amdgpu_ring_write(ring, ib->length_dw);
1373
1374        amdgpu_ring_write(ring,
1375                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
1376        amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
1377
1378        amdgpu_ring_write(ring,
1379                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
1380        amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
1381
1382        amdgpu_ring_write(ring,
1383                PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
1384        amdgpu_ring_write(ring, 0);
1385
1386        amdgpu_ring_write(ring,
1387                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1388        amdgpu_ring_write(ring, 0x01400200);
1389
1390        amdgpu_ring_write(ring,
1391                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1392        amdgpu_ring_write(ring, 0x2);
1393
1394        amdgpu_ring_write(ring,
1395                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
1396        amdgpu_ring_write(ring, 0x2);
1397}
1398
1399static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
1400                                            uint32_t reg, uint32_t val,
1401                                            uint32_t mask)
1402{
1403        struct amdgpu_device *adev = ring->adev;
1404        uint32_t reg_offset = (reg << 2);
1405
1406        amdgpu_ring_write(ring,
1407                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
1408        amdgpu_ring_write(ring, 0x01400200);
1409
1410        amdgpu_ring_write(ring,
1411                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
1412        amdgpu_ring_write(ring, val);
1413
1414        amdgpu_ring_write(ring,
1415                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1416        if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1417                ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1418                amdgpu_ring_write(ring, 0);
1419                amdgpu_ring_write(ring,
1420                        PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
1421        } else {
1422                amdgpu_ring_write(ring, reg_offset);
1423                amdgpu_ring_write(ring,
1424                        PACKETJ(0, 0, 0, PACKETJ_TYPE3));
1425        }
1426        amdgpu_ring_write(ring, mask);
1427}
1428
1429static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
1430                unsigned vmid, uint64_t pd_addr)
1431{
1432        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1433        uint32_t data0, data1, mask;
1434
1435        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1436
1437        /* wait for register write */
1438        data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1439        data1 = lower_32_bits(pd_addr);
1440        mask = 0xffffffff;
1441        vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
1442}
1443
1444static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
1445                                        uint32_t reg, uint32_t val)
1446{
1447        struct amdgpu_device *adev = ring->adev;
1448        uint32_t reg_offset = (reg << 2);
1449
1450        amdgpu_ring_write(ring,
1451                PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
1452        if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1453                        ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1454                amdgpu_ring_write(ring, 0);
1455                amdgpu_ring_write(ring,
1456                        PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
1457        } else {
1458                amdgpu_ring_write(ring, reg_offset);
1459                amdgpu_ring_write(ring,
1460                        PACKETJ(0, 0, 0, PACKETJ_TYPE0));
1461        }
1462        amdgpu_ring_write(ring, val);
1463}
1464
1465static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
1466{
1467        int i;
1468
1469        WARN_ON(ring->wptr % 2 || count % 2);
1470
1471        for (i = 0; i < count / 2; i++) {
1472                amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
1473                amdgpu_ring_write(ring, 0);
1474        }
1475}
1476
1477static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
1478{
1479        struct amdgpu_device *adev = ring->adev;
1480        ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1481        if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1482                ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1483                ring->ring[(*ptr)++] = 0;
1484                ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
1485        } else {
1486                ring->ring[(*ptr)++] = reg_offset;
1487                ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
1488        }
1489        ring->ring[(*ptr)++] = val;
1490}
1491
1492static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
1493{
1494        struct amdgpu_device *adev = ring->adev;
1495
1496        uint32_t reg, reg_offset, val, mask, i;
1497
1498        // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
1499        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
1500        reg_offset = (reg << 2);
1501        val = lower_32_bits(ring->gpu_addr);
1502        vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1503
1504        // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
1505        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
1506        reg_offset = (reg << 2);
1507        val = upper_32_bits(ring->gpu_addr);
1508        vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1509
1510        // 3rd to 5th: issue MEM_READ commands
1511        for (i = 0; i <= 2; i++) {
1512                ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
1513                ring->ring[ptr++] = 0;
1514        }
1515
1516        // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
1517        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1518        reg_offset = (reg << 2);
1519        val = 0x13;
1520        vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1521
1522        // 7th: program mmUVD_JRBC_RB_REF_DATA
1523        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
1524        reg_offset = (reg << 2);
1525        val = 0x1;
1526        vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1527
1528        // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
1529        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1530        reg_offset = (reg << 2);
1531        val = 0x1;
1532        mask = 0x1;
1533
1534        ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
1535        ring->ring[ptr++] = 0x01400200;
1536        ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
1537        ring->ring[ptr++] = val;
1538        ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
1539        if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
1540                ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
1541                ring->ring[ptr++] = 0;
1542                ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
1543        } else {
1544                ring->ring[ptr++] = reg_offset;
1545                ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
1546        }
1547        ring->ring[ptr++] = mask;
1548
1549        //9th to 21st: insert no-op
1550        for (i = 0; i <= 12; i++) {
1551                ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
1552                ring->ring[ptr++] = 0;
1553        }
1554
1555        //22nd: reset mmUVD_JRBC_RB_RPTR
1556        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
1557        reg_offset = (reg << 2);
1558        val = 0;
1559        vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1560
1561        //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
1562        reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
1563        reg_offset = (reg << 2);
1564        val = 0x12;
1565        vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
1566}
1567
1568static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1569                                        struct amdgpu_irq_src *source,
1570                                        unsigned type,
1571                                        enum amdgpu_interrupt_state state)
1572{
1573        return 0;
1574}
1575
1576static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1577                                      struct amdgpu_irq_src *source,
1578                                      struct amdgpu_iv_entry *entry)
1579{
1580        DRM_DEBUG("IH: VCN TRAP\n");
1581
1582        switch (entry->src_id) {
1583        case 124:
1584                amdgpu_fence_process(&adev->vcn.ring_dec);
1585                break;
1586        case 119:
1587                amdgpu_fence_process(&adev->vcn.ring_enc[0]);
1588                break;
1589        case 120:
1590                amdgpu_fence_process(&adev->vcn.ring_enc[1]);
1591                break;
1592        case 126:
1593                amdgpu_fence_process(&adev->vcn.ring_jpeg);
1594                break;
1595        default:
1596                DRM_ERROR("Unhandled interrupt: %d %d\n",
1597                          entry->src_id, entry->src_data[0]);
1598                break;
1599        }
1600
1601        return 0;
1602}
1603
1604static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1605{
1606        struct amdgpu_device *adev = ring->adev;
1607        int i;
1608
1609        WARN_ON(ring->wptr % 2 || count % 2);
1610
1611        for (i = 0; i < count / 2; i++) {
1612                amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1613                amdgpu_ring_write(ring, 0);
1614        }
1615}
1616
1617static int vcn_v1_0_set_powergating_state(void *handle,
1618                                          enum amd_powergating_state state)
1619{
1620        /* This doesn't actually powergate the VCN block.
1621         * That's done in the dpm code via the SMC.  This
1622         * just re-inits the block as necessary.  The actual
1623         * gating still happens in the dpm code.  We should
1624         * revisit this when there is a cleaner line between
1625         * the smc and the hw blocks
1626         */
1627        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628
1629        if (state == AMD_PG_STATE_GATE)
1630                return vcn_v1_0_stop(adev);
1631        else
1632                return vcn_v1_0_start(adev);
1633}
1634
1635static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1636        .name = "vcn_v1_0",
1637        .early_init = vcn_v1_0_early_init,
1638        .late_init = NULL,
1639        .sw_init = vcn_v1_0_sw_init,
1640        .sw_fini = vcn_v1_0_sw_fini,
1641        .hw_init = vcn_v1_0_hw_init,
1642        .hw_fini = vcn_v1_0_hw_fini,
1643        .suspend = vcn_v1_0_suspend,
1644        .resume = vcn_v1_0_resume,
1645        .is_idle = vcn_v1_0_is_idle,
1646        .wait_for_idle = vcn_v1_0_wait_for_idle,
1647        .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1648        .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1649        .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1650        .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1651        .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1652        .set_powergating_state = vcn_v1_0_set_powergating_state,
1653};
1654
1655static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1656        .type = AMDGPU_RING_TYPE_VCN_DEC,
1657        .align_mask = 0xf,
1658        .support_64bit_ptrs = false,
1659        .vmhub = AMDGPU_MMHUB,
1660        .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1661        .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1662        .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1663        .emit_frame_size =
1664                6 + 6 + /* hdp invalidate / flush */
1665                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1666                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1667                8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1668                14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1669                6,
1670        .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1671        .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1672        .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1673        .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1674        .test_ring = amdgpu_vcn_dec_ring_test_ring,
1675        .test_ib = amdgpu_vcn_dec_ring_test_ib,
1676        .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1677        .insert_start = vcn_v1_0_dec_ring_insert_start,
1678        .insert_end = vcn_v1_0_dec_ring_insert_end,
1679        .pad_ib = amdgpu_ring_generic_pad_ib,
1680        .begin_use = amdgpu_vcn_ring_begin_use,
1681        .end_use = amdgpu_vcn_ring_end_use,
1682        .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
1683        .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
1684        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1685};
1686
1687static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
1688        .type = AMDGPU_RING_TYPE_VCN_ENC,
1689        .align_mask = 0x3f,
1690        .nop = VCN_ENC_CMD_NO_OP,
1691        .support_64bit_ptrs = false,
1692        .vmhub = AMDGPU_MMHUB,
1693        .get_rptr = vcn_v1_0_enc_ring_get_rptr,
1694        .get_wptr = vcn_v1_0_enc_ring_get_wptr,
1695        .set_wptr = vcn_v1_0_enc_ring_set_wptr,
1696        .emit_frame_size =
1697                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1698                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1699                4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
1700                5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1701                1, /* vcn_v1_0_enc_ring_insert_end */
1702        .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
1703        .emit_ib = vcn_v1_0_enc_ring_emit_ib,
1704        .emit_fence = vcn_v1_0_enc_ring_emit_fence,
1705        .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
1706        .test_ring = amdgpu_vcn_enc_ring_test_ring,
1707        .test_ib = amdgpu_vcn_enc_ring_test_ib,
1708        .insert_nop = amdgpu_ring_insert_nop,
1709        .insert_end = vcn_v1_0_enc_ring_insert_end,
1710        .pad_ib = amdgpu_ring_generic_pad_ib,
1711        .begin_use = amdgpu_vcn_ring_begin_use,
1712        .end_use = amdgpu_vcn_ring_end_use,
1713        .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
1714        .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
1715        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1716};
1717
1718static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
1719        .type = AMDGPU_RING_TYPE_VCN_JPEG,
1720        .align_mask = 0xf,
1721        .nop = PACKET0(0x81ff, 0),
1722        .support_64bit_ptrs = false,
1723        .vmhub = AMDGPU_MMHUB,
1724        .extra_dw = 64,
1725        .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
1726        .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
1727        .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
1728        .emit_frame_size =
1729                6 + 6 + /* hdp invalidate / flush */
1730                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1731                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1732                8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1733                14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1734                6,
1735        .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
1736        .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
1737        .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
1738        .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
1739        .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
1740        .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
1741        .insert_nop = vcn_v1_0_jpeg_ring_nop,
1742        .insert_start = vcn_v1_0_jpeg_ring_insert_start,
1743        .insert_end = vcn_v1_0_jpeg_ring_insert_end,
1744        .pad_ib = amdgpu_ring_generic_pad_ib,
1745        .begin_use = amdgpu_vcn_ring_begin_use,
1746        .end_use = amdgpu_vcn_ring_end_use,
1747        .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
1748        .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
1749};
1750
1751static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
1752{
1753        adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
1754        DRM_INFO("VCN decode is enabled in VM mode\n");
1755}
1756
1757static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1758{
1759        int i;
1760
1761        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1762                adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
1763
1764        DRM_INFO("VCN encode is enabled in VM mode\n");
1765}
1766
1767static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
1768{
1769        adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
1770        DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
1771}
1772
1773static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
1774        .set = vcn_v1_0_set_interrupt_state,
1775        .process = vcn_v1_0_process_interrupt,
1776};
1777
1778static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
1779{
1780        adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
1781        adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
1782}
1783
1784const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
1785{
1786                .type = AMD_IP_BLOCK_TYPE_VCN,
1787                .major = 1,
1788                .minor = 0,
1789                .rev = 0,
1790                .funcs = &vcn_v1_0_ip_funcs,
1791};
1792