linux/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 * Authors: Christian König <christian.koenig@amd.com>
  26 */
  27
  28#include <linux/firmware.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_vce.h"
  32#include "cikd.h"
  33#include "vce/vce_2_0_d.h"
  34#include "vce/vce_2_0_sh_mask.h"
  35#include "smu/smu_7_0_1_d.h"
  36#include "smu/smu_7_0_1_sh_mask.h"
  37#include "oss/oss_2_0_d.h"
  38#include "oss/oss_2_0_sh_mask.h"
  39
  40#define VCE_V2_0_FW_SIZE        (256 * 1024)
  41#define VCE_V2_0_STACK_SIZE     (64 * 1024)
  42#define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
  43#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
  44
  45static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
  46static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
  47
  48/**
  49 * vce_v2_0_ring_get_rptr - get read pointer
  50 *
  51 * @ring: amdgpu_ring pointer
  52 *
  53 * Returns the current hardware read pointer
  54 */
  55static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
  56{
  57        struct amdgpu_device *adev = ring->adev;
  58
  59        if (ring->me == 0)
  60                return RREG32(mmVCE_RB_RPTR);
  61        else
  62                return RREG32(mmVCE_RB_RPTR2);
  63}
  64
  65/**
  66 * vce_v2_0_ring_get_wptr - get write pointer
  67 *
  68 * @ring: amdgpu_ring pointer
  69 *
  70 * Returns the current hardware write pointer
  71 */
  72static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
  73{
  74        struct amdgpu_device *adev = ring->adev;
  75
  76        if (ring->me == 0)
  77                return RREG32(mmVCE_RB_WPTR);
  78        else
  79                return RREG32(mmVCE_RB_WPTR2);
  80}
  81
  82/**
  83 * vce_v2_0_ring_set_wptr - set write pointer
  84 *
  85 * @ring: amdgpu_ring pointer
  86 *
  87 * Commits the write pointer to the hardware
  88 */
  89static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
  90{
  91        struct amdgpu_device *adev = ring->adev;
  92
  93        if (ring->me == 0)
  94                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
  95        else
  96                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
  97}
  98
  99static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
 100{
 101        int i, j;
 102
 103        for (i = 0; i < 10; ++i) {
 104                for (j = 0; j < 100; ++j) {
 105                        uint32_t status = RREG32(mmVCE_LMI_STATUS);
 106
 107                        if (status & 0x337f)
 108                                return 0;
 109                        mdelay(10);
 110                }
 111        }
 112
 113        return -ETIMEDOUT;
 114}
 115
 116static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
 117{
 118        int i, j;
 119
 120        for (i = 0; i < 10; ++i) {
 121                for (j = 0; j < 100; ++j) {
 122                        uint32_t status = RREG32(mmVCE_STATUS);
 123
 124                        if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
 125                                return 0;
 126                        mdelay(10);
 127                }
 128
 129                DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
 130                WREG32_P(mmVCE_SOFT_RESET,
 131                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
 132                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 133                mdelay(10);
 134                WREG32_P(mmVCE_SOFT_RESET, 0,
 135                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 136                mdelay(10);
 137        }
 138
 139        return -ETIMEDOUT;
 140}
 141
 142static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
 143{
 144        WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
 145}
 146
 147static void vce_v2_0_init_cg(struct amdgpu_device *adev)
 148{
 149        u32 tmp;
 150
 151        tmp = RREG32(mmVCE_CLOCK_GATING_A);
 152        tmp &= ~0xfff;
 153        tmp |= ((0 << 0) | (4 << 4));
 154        tmp |= 0x40000;
 155        WREG32(mmVCE_CLOCK_GATING_A, tmp);
 156
 157        tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 158        tmp &= ~0xfff;
 159        tmp |= ((0 << 0) | (4 << 4));
 160        WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 161
 162        tmp = RREG32(mmVCE_CLOCK_GATING_B);
 163        tmp |= 0x10;
 164        tmp &= ~0x100000;
 165        WREG32(mmVCE_CLOCK_GATING_B, tmp);
 166}
 167
 168static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
 169{
 170        uint32_t size, offset;
 171
 172        WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
 173        WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
 174        WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
 175        WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
 176
 177        WREG32(mmVCE_LMI_CTRL, 0x00398000);
 178        WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
 179        WREG32(mmVCE_LMI_SWAP_CNTL, 0);
 180        WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
 181        WREG32(mmVCE_LMI_VM_CTRL, 0);
 182
 183        WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
 184
 185        offset = AMDGPU_VCE_FIRMWARE_OFFSET;
 186        size = VCE_V2_0_FW_SIZE;
 187        WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
 188        WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
 189
 190        offset += size;
 191        size = VCE_V2_0_STACK_SIZE;
 192        WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
 193        WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
 194
 195        offset += size;
 196        size = VCE_V2_0_DATA_SIZE;
 197        WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
 198        WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 199
 200        WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
 201        WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 202}
 203
 204static bool vce_v2_0_is_idle(void *handle)
 205{
 206        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 207
 208        return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
 209}
 210
 211static int vce_v2_0_wait_for_idle(void *handle)
 212{
 213        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 214        unsigned i;
 215
 216        for (i = 0; i < adev->usec_timeout; i++) {
 217                if (vce_v2_0_is_idle(handle))
 218                        return 0;
 219        }
 220        return -ETIMEDOUT;
 221}
 222
 223/**
 224 * vce_v2_0_start - start VCE block
 225 *
 226 * @adev: amdgpu_device pointer
 227 *
 228 * Setup and start the VCE block
 229 */
 230static int vce_v2_0_start(struct amdgpu_device *adev)
 231{
 232        struct amdgpu_ring *ring;
 233        int r;
 234
 235        /* set BUSY flag */
 236        WREG32_P(mmVCE_STATUS, 1, ~1);
 237
 238        vce_v2_0_init_cg(adev);
 239        vce_v2_0_disable_cg(adev);
 240
 241        vce_v2_0_mc_resume(adev);
 242
 243        ring = &adev->vce.ring[0];
 244        WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
 245        WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
 246        WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
 247        WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 248        WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
 249
 250        ring = &adev->vce.ring[1];
 251        WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
 252        WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
 253        WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
 254        WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 255        WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 256
 257        WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 258        WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 259        mdelay(100);
 260        WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 261
 262        r = vce_v2_0_firmware_loaded(adev);
 263
 264        /* clear BUSY flag */
 265        WREG32_P(mmVCE_STATUS, 0, ~1);
 266
 267        if (r) {
 268                DRM_ERROR("VCE not responding, giving up!!!\n");
 269                return r;
 270        }
 271
 272        return 0;
 273}
 274
 275static int vce_v2_0_stop(struct amdgpu_device *adev)
 276{
 277        int i;
 278        int status;
 279
 280        if (vce_v2_0_lmi_clean(adev)) {
 281                DRM_INFO("vce is not idle \n");
 282                return 0;
 283        }
 284
 285        if (vce_v2_0_wait_for_idle(adev)) {
 286                DRM_INFO("VCE is busy, Can't set clock gating");
 287                return 0;
 288        }
 289
 290        /* Stall UMC and register bus before resetting VCPU */
 291        WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
 292
 293        for (i = 0; i < 100; ++i) {
 294                status = RREG32(mmVCE_LMI_STATUS);
 295                if (status & 0x240)
 296                        break;
 297                mdelay(1);
 298        }
 299
 300        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
 301
 302        /* put LMI, VCPU, RBC etc... into reset */
 303        WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
 304
 305        WREG32(mmVCE_STATUS, 0);
 306
 307        return 0;
 308}
 309
 310static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
 311{
 312        u32 tmp;
 313
 314        if (gated) {
 315                tmp = RREG32(mmVCE_CLOCK_GATING_B);
 316                tmp |= 0xe70000;
 317                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 318
 319                tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 320                tmp |= 0xff000000;
 321                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 322
 323                tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 324                tmp &= ~0x3fc;
 325                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 326
 327                WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 328        } else {
 329                tmp = RREG32(mmVCE_CLOCK_GATING_B);
 330                tmp |= 0xe7;
 331                tmp &= ~0xe70000;
 332                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 333
 334                tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 335                tmp |= 0x1fe000;
 336                tmp &= ~0xff000000;
 337                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 338
 339                tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 340                tmp |= 0x3fc;
 341                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 342        }
 343}
 344
 345static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 346{
 347        u32 orig, tmp;
 348
 349/* LMI_MC/LMI_UMC always set in dynamic,
 350 * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
 351 */
 352        tmp = RREG32(mmVCE_CLOCK_GATING_B);
 353        tmp &= ~0x00060006;
 354
 355/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
 356        if (gated) {
 357                tmp |= 0xe10000;
 358                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 359        } else {
 360                tmp |= 0xe1;
 361                tmp &= ~0xe10000;
 362                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 363        }
 364
 365        orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 366        tmp &= ~0x1fe000;
 367        tmp &= ~0xff000000;
 368        if (tmp != orig)
 369                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 370
 371        orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 372        tmp &= ~0x3fc;
 373        if (tmp != orig)
 374                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 375
 376        /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
 377        WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
 378
 379        if(gated)
 380                WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 381}
 382
 383static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
 384                                                                bool sw_cg)
 385{
 386        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
 387                if (sw_cg)
 388                        vce_v2_0_set_sw_cg(adev, true);
 389                else
 390                        vce_v2_0_set_dyn_cg(adev, true);
 391        } else {
 392                vce_v2_0_disable_cg(adev);
 393
 394                if (sw_cg)
 395                        vce_v2_0_set_sw_cg(adev, false);
 396                else
 397                        vce_v2_0_set_dyn_cg(adev, false);
 398        }
 399}
 400
 401static int vce_v2_0_early_init(void *handle)
 402{
 403        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 404
 405        adev->vce.num_rings = 2;
 406
 407        vce_v2_0_set_ring_funcs(adev);
 408        vce_v2_0_set_irq_funcs(adev);
 409
 410        return 0;
 411}
 412
 413static int vce_v2_0_sw_init(void *handle)
 414{
 415        struct amdgpu_ring *ring;
 416        int r, i;
 417        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 418
 419        /* VCE */
 420        r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
 421        if (r)
 422                return r;
 423
 424        r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
 425                VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
 426        if (r)
 427                return r;
 428
 429        r = amdgpu_vce_resume(adev);
 430        if (r)
 431                return r;
 432
 433        for (i = 0; i < adev->vce.num_rings; i++) {
 434                ring = &adev->vce.ring[i];
 435                sprintf(ring->name, "vce%d", i);
 436                r = amdgpu_ring_init(adev, ring, 512,
 437                                     &adev->vce.irq, 0,
 438                                     AMDGPU_RING_PRIO_DEFAULT);
 439                if (r)
 440                        return r;
 441        }
 442
 443        r = amdgpu_vce_entity_init(adev);
 444
 445        return r;
 446}
 447
 448static int vce_v2_0_sw_fini(void *handle)
 449{
 450        int r;
 451        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 452
 453        r = amdgpu_vce_suspend(adev);
 454        if (r)
 455                return r;
 456
 457        return amdgpu_vce_sw_fini(adev);
 458}
 459
 460static int vce_v2_0_hw_init(void *handle)
 461{
 462        int r, i;
 463        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 464
 465        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 466        vce_v2_0_enable_mgcg(adev, true, false);
 467
 468        for (i = 0; i < adev->vce.num_rings; i++) {
 469                r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 470                if (r)
 471                        return r;
 472        }
 473
 474        DRM_INFO("VCE initialized successfully.\n");
 475
 476        return 0;
 477}
 478
 479static int vce_v2_0_hw_fini(void *handle)
 480{
 481        return 0;
 482}
 483
 484static int vce_v2_0_suspend(void *handle)
 485{
 486        int r;
 487        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 488
 489        r = vce_v2_0_hw_fini(adev);
 490        if (r)
 491                return r;
 492
 493        return amdgpu_vce_suspend(adev);
 494}
 495
 496static int vce_v2_0_resume(void *handle)
 497{
 498        int r;
 499        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 500
 501        r = amdgpu_vce_resume(adev);
 502        if (r)
 503                return r;
 504
 505        return vce_v2_0_hw_init(adev);
 506}
 507
 508static int vce_v2_0_soft_reset(void *handle)
 509{
 510        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 511
 512        WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
 513        mdelay(5);
 514
 515        return vce_v2_0_start(adev);
 516}
 517
 518static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
 519                                        struct amdgpu_irq_src *source,
 520                                        unsigned type,
 521                                        enum amdgpu_interrupt_state state)
 522{
 523        uint32_t val = 0;
 524
 525        if (state == AMDGPU_IRQ_STATE_ENABLE)
 526                val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
 527
 528        WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
 529        return 0;
 530}
 531
 532static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
 533                                      struct amdgpu_irq_src *source,
 534                                      struct amdgpu_iv_entry *entry)
 535{
 536        DRM_DEBUG("IH: VCE\n");
 537        switch (entry->src_data[0]) {
 538        case 0:
 539        case 1:
 540                amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
 541                break;
 542        default:
 543                DRM_ERROR("Unhandled interrupt: %d %d\n",
 544                          entry->src_id, entry->src_data[0]);
 545                break;
 546        }
 547
 548        return 0;
 549}
 550
 551static int vce_v2_0_set_clockgating_state(void *handle,
 552                                          enum amd_clockgating_state state)
 553{
 554        bool gate = false;
 555        bool sw_cg = false;
 556
 557        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 558
 559        if (state == AMD_CG_STATE_GATE) {
 560                gate = true;
 561                sw_cg = true;
 562        }
 563
 564        vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 565
 566        return 0;
 567}
 568
 569static int vce_v2_0_set_powergating_state(void *handle,
 570                                          enum amd_powergating_state state)
 571{
 572        /* This doesn't actually powergate the VCE block.
 573         * That's done in the dpm code via the SMC.  This
 574         * just re-inits the block as necessary.  The actual
 575         * gating still happens in the dpm code.  We should
 576         * revisit this when there is a cleaner line between
 577         * the smc and the hw blocks
 578         */
 579        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 580
 581        if (state == AMD_PG_STATE_GATE)
 582                return vce_v2_0_stop(adev);
 583        else
 584                return vce_v2_0_start(adev);
 585}
 586
 587static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 588        .name = "vce_v2_0",
 589        .early_init = vce_v2_0_early_init,
 590        .late_init = NULL,
 591        .sw_init = vce_v2_0_sw_init,
 592        .sw_fini = vce_v2_0_sw_fini,
 593        .hw_init = vce_v2_0_hw_init,
 594        .hw_fini = vce_v2_0_hw_fini,
 595        .suspend = vce_v2_0_suspend,
 596        .resume = vce_v2_0_resume,
 597        .is_idle = vce_v2_0_is_idle,
 598        .wait_for_idle = vce_v2_0_wait_for_idle,
 599        .soft_reset = vce_v2_0_soft_reset,
 600        .set_clockgating_state = vce_v2_0_set_clockgating_state,
 601        .set_powergating_state = vce_v2_0_set_powergating_state,
 602};
 603
 604static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 605        .type = AMDGPU_RING_TYPE_VCE,
 606        .align_mask = 0xf,
 607        .nop = VCE_CMD_NO_OP,
 608        .support_64bit_ptrs = false,
 609        .no_user_fence = true,
 610        .get_rptr = vce_v2_0_ring_get_rptr,
 611        .get_wptr = vce_v2_0_ring_get_wptr,
 612        .set_wptr = vce_v2_0_ring_set_wptr,
 613        .parse_cs = amdgpu_vce_ring_parse_cs,
 614        .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
 615        .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 616        .emit_ib = amdgpu_vce_ring_emit_ib,
 617        .emit_fence = amdgpu_vce_ring_emit_fence,
 618        .test_ring = amdgpu_vce_ring_test_ring,
 619        .test_ib = amdgpu_vce_ring_test_ib,
 620        .insert_nop = amdgpu_ring_insert_nop,
 621        .pad_ib = amdgpu_ring_generic_pad_ib,
 622        .begin_use = amdgpu_vce_ring_begin_use,
 623        .end_use = amdgpu_vce_ring_end_use,
 624};
 625
 626static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 627{
 628        int i;
 629
 630        for (i = 0; i < adev->vce.num_rings; i++) {
 631                adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 632                adev->vce.ring[i].me = i;
 633        }
 634}
 635
 636static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
 637        .set = vce_v2_0_set_interrupt_state,
 638        .process = vce_v2_0_process_interrupt,
 639};
 640
 641static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 642{
 643        adev->vce.irq.num_types = 1;
 644        adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
 645};
 646
 647const struct amdgpu_ip_block_version vce_v2_0_ip_block =
 648{
 649                .type = AMD_IP_BLOCK_TYPE_VCE,
 650                .major = 2,
 651                .minor = 0,
 652                .rev = 0,
 653                .funcs = &vce_v2_0_ip_funcs,
 654};
 655