linux/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 * Authors: Christian König <christian.koenig@amd.com>
  26 */
  27
  28#include <linux/firmware.h>
  29#include <drm/drmP.h>
  30#include "amdgpu.h"
  31#include "amdgpu_vce.h"
  32#include "cikd.h"
  33#include "vce/vce_2_0_d.h"
  34#include "vce/vce_2_0_sh_mask.h"
  35#include "smu/smu_7_0_1_d.h"
  36#include "smu/smu_7_0_1_sh_mask.h"
  37#include "oss/oss_2_0_d.h"
  38#include "oss/oss_2_0_sh_mask.h"
  39
  40#define VCE_V2_0_FW_SIZE        (256 * 1024)
  41#define VCE_V2_0_STACK_SIZE     (64 * 1024)
  42#define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
  43#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
  44
  45static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
  46static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
  47
  48/**
  49 * vce_v2_0_ring_get_rptr - get read pointer
  50 *
  51 * @ring: amdgpu_ring pointer
  52 *
  53 * Returns the current hardware read pointer
  54 */
  55static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
  56{
  57        struct amdgpu_device *adev = ring->adev;
  58
  59        if (ring == &adev->vce.ring[0])
  60                return RREG32(mmVCE_RB_RPTR);
  61        else
  62                return RREG32(mmVCE_RB_RPTR2);
  63}
  64
  65/**
  66 * vce_v2_0_ring_get_wptr - get write pointer
  67 *
  68 * @ring: amdgpu_ring pointer
  69 *
  70 * Returns the current hardware write pointer
  71 */
  72static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
  73{
  74        struct amdgpu_device *adev = ring->adev;
  75
  76        if (ring == &adev->vce.ring[0])
  77                return RREG32(mmVCE_RB_WPTR);
  78        else
  79                return RREG32(mmVCE_RB_WPTR2);
  80}
  81
  82/**
  83 * vce_v2_0_ring_set_wptr - set write pointer
  84 *
  85 * @ring: amdgpu_ring pointer
  86 *
  87 * Commits the write pointer to the hardware
  88 */
  89static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
  90{
  91        struct amdgpu_device *adev = ring->adev;
  92
  93        if (ring == &adev->vce.ring[0])
  94                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
  95        else
  96                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
  97}
  98
  99static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
 100{
 101        int i, j;
 102
 103        for (i = 0; i < 10; ++i) {
 104                for (j = 0; j < 100; ++j) {
 105                        uint32_t status = RREG32(mmVCE_LMI_STATUS);
 106
 107                        if (status & 0x337f)
 108                                return 0;
 109                        mdelay(10);
 110                }
 111        }
 112
 113        return -ETIMEDOUT;
 114}
 115
 116static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
 117{
 118        int i, j;
 119
 120        for (i = 0; i < 10; ++i) {
 121                for (j = 0; j < 100; ++j) {
 122                        uint32_t status = RREG32(mmVCE_STATUS);
 123
 124                        if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
 125                                return 0;
 126                        mdelay(10);
 127                }
 128
 129                DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
 130                WREG32_P(mmVCE_SOFT_RESET,
 131                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
 132                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 133                mdelay(10);
 134                WREG32_P(mmVCE_SOFT_RESET, 0,
 135                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 136                mdelay(10);
 137        }
 138
 139        return -ETIMEDOUT;
 140}
 141
 142static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
 143{
 144        WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
 145}
 146
 147static void vce_v2_0_init_cg(struct amdgpu_device *adev)
 148{
 149        u32 tmp;
 150
 151        tmp = RREG32(mmVCE_CLOCK_GATING_A);
 152        tmp &= ~0xfff;
 153        tmp |= ((0 << 0) | (4 << 4));
 154        tmp |= 0x40000;
 155        WREG32(mmVCE_CLOCK_GATING_A, tmp);
 156
 157        tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 158        tmp &= ~0xfff;
 159        tmp |= ((0 << 0) | (4 << 4));
 160        WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 161
 162        tmp = RREG32(mmVCE_CLOCK_GATING_B);
 163        tmp |= 0x10;
 164        tmp &= ~0x100000;
 165        WREG32(mmVCE_CLOCK_GATING_B, tmp);
 166}
 167
 168static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
 169{
 170        uint32_t size, offset;
 171
 172        WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
 173        WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
 174        WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
 175        WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
 176
 177        WREG32(mmVCE_LMI_CTRL, 0x00398000);
 178        WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
 179        WREG32(mmVCE_LMI_SWAP_CNTL, 0);
 180        WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
 181        WREG32(mmVCE_LMI_VM_CTRL, 0);
 182
 183        WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
 184
 185        offset = AMDGPU_VCE_FIRMWARE_OFFSET;
 186        size = VCE_V2_0_FW_SIZE;
 187        WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
 188        WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
 189
 190        offset += size;
 191        size = VCE_V2_0_STACK_SIZE;
 192        WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
 193        WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
 194
 195        offset += size;
 196        size = VCE_V2_0_DATA_SIZE;
 197        WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
 198        WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 199
 200        WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
 201        WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 202}
 203
 204static bool vce_v2_0_is_idle(void *handle)
 205{
 206        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 207
 208        return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
 209}
 210
 211static int vce_v2_0_wait_for_idle(void *handle)
 212{
 213        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 214        unsigned i;
 215
 216        for (i = 0; i < adev->usec_timeout; i++) {
 217                if (vce_v2_0_is_idle(handle))
 218                        return 0;
 219        }
 220        return -ETIMEDOUT;
 221}
 222
 223/**
 224 * vce_v2_0_start - start VCE block
 225 *
 226 * @adev: amdgpu_device pointer
 227 *
 228 * Setup and start the VCE block
 229 */
 230static int vce_v2_0_start(struct amdgpu_device *adev)
 231{
 232        struct amdgpu_ring *ring;
 233        int r;
 234
 235        /* set BUSY flag */
 236        WREG32_P(mmVCE_STATUS, 1, ~1);
 237
 238        vce_v2_0_init_cg(adev);
 239        vce_v2_0_disable_cg(adev);
 240
 241        vce_v2_0_mc_resume(adev);
 242
 243        ring = &adev->vce.ring[0];
 244        WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
 245        WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
 246        WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
 247        WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 248        WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
 249
 250        ring = &adev->vce.ring[1];
 251        WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
 252        WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
 253        WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
 254        WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 255        WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 256
 257        WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 258        WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 259        mdelay(100);
 260        WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 261
 262        r = vce_v2_0_firmware_loaded(adev);
 263
 264        /* clear BUSY flag */
 265        WREG32_P(mmVCE_STATUS, 0, ~1);
 266
 267        if (r) {
 268                DRM_ERROR("VCE not responding, giving up!!!\n");
 269                return r;
 270        }
 271
 272        return 0;
 273}
 274
 275static int vce_v2_0_stop(struct amdgpu_device *adev)
 276{
 277        int i;
 278        int status;
 279
 280        if (vce_v2_0_lmi_clean(adev)) {
 281                DRM_INFO("vce is not idle \n");
 282                return 0;
 283        }
 284
 285        if (vce_v2_0_wait_for_idle(adev)) {
 286                DRM_INFO("VCE is busy, Can't set clock gateing");
 287                return 0;
 288        }
 289
 290        /* Stall UMC and register bus before resetting VCPU */
 291        WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
 292
 293        for (i = 0; i < 100; ++i) {
 294                status = RREG32(mmVCE_LMI_STATUS);
 295                if (status & 0x240)
 296                        break;
 297                mdelay(1);
 298        }
 299
 300        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
 301
 302        /* put LMI, VCPU, RBC etc... into reset */
 303        WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
 304
 305        WREG32(mmVCE_STATUS, 0);
 306
 307        return 0;
 308}
 309
 310static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
 311{
 312        u32 tmp;
 313
 314        if (gated) {
 315                tmp = RREG32(mmVCE_CLOCK_GATING_B);
 316                tmp |= 0xe70000;
 317                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 318
 319                tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 320                tmp |= 0xff000000;
 321                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 322
 323                tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 324                tmp &= ~0x3fc;
 325                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 326
 327                WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 328        } else {
 329                tmp = RREG32(mmVCE_CLOCK_GATING_B);
 330                tmp |= 0xe7;
 331                tmp &= ~0xe70000;
 332                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 333
 334                tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 335                tmp |= 0x1fe000;
 336                tmp &= ~0xff000000;
 337                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 338
 339                tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 340                tmp |= 0x3fc;
 341                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 342        }
 343}
 344
 345static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 346{
 347        u32 orig, tmp;
 348
 349/* LMI_MC/LMI_UMC always set in dynamic,
 350 * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
 351 */
 352        tmp = RREG32(mmVCE_CLOCK_GATING_B);
 353        tmp &= ~0x00060006;
 354
 355/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
 356        if (gated) {
 357                tmp |= 0xe10000;
 358                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 359        } else {
 360                tmp |= 0xe1;
 361                tmp &= ~0xe10000;
 362                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 363        }
 364
 365        orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 366        tmp &= ~0x1fe000;
 367        tmp &= ~0xff000000;
 368        if (tmp != orig)
 369                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 370
 371        orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 372        tmp &= ~0x3fc;
 373        if (tmp != orig)
 374                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 375
 376        /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
 377        WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
 378
 379        if(gated)
 380                WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 381}
 382
 383static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
 384                                                                bool sw_cg)
 385{
 386        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
 387                if (sw_cg)
 388                        vce_v2_0_set_sw_cg(adev, true);
 389                else
 390                        vce_v2_0_set_dyn_cg(adev, true);
 391        } else {
 392                vce_v2_0_disable_cg(adev);
 393
 394                if (sw_cg)
 395                        vce_v2_0_set_sw_cg(adev, false);
 396                else
 397                        vce_v2_0_set_dyn_cg(adev, false);
 398        }
 399}
 400
 401static int vce_v2_0_early_init(void *handle)
 402{
 403        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 404
 405        adev->vce.num_rings = 2;
 406
 407        vce_v2_0_set_ring_funcs(adev);
 408        vce_v2_0_set_irq_funcs(adev);
 409
 410        return 0;
 411}
 412
 413static int vce_v2_0_sw_init(void *handle)
 414{
 415        struct amdgpu_ring *ring;
 416        int r, i;
 417        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 418
 419        /* VCE */
 420        r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
 421        if (r)
 422                return r;
 423
 424        r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
 425                VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
 426        if (r)
 427                return r;
 428
 429        r = amdgpu_vce_resume(adev);
 430        if (r)
 431                return r;
 432
 433        for (i = 0; i < adev->vce.num_rings; i++) {
 434                ring = &adev->vce.ring[i];
 435                sprintf(ring->name, "vce%d", i);
 436                r = amdgpu_ring_init(adev, ring, 512,
 437                                     &adev->vce.irq, 0);
 438                if (r)
 439                        return r;
 440        }
 441
 442        return r;
 443}
 444
 445static int vce_v2_0_sw_fini(void *handle)
 446{
 447        int r;
 448        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 449
 450        r = amdgpu_vce_suspend(adev);
 451        if (r)
 452                return r;
 453
 454        return amdgpu_vce_sw_fini(adev);
 455}
 456
 457static int vce_v2_0_hw_init(void *handle)
 458{
 459        int r, i;
 460        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 461
 462        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 463        vce_v2_0_enable_mgcg(adev, true, false);
 464        for (i = 0; i < adev->vce.num_rings; i++)
 465                adev->vce.ring[i].ready = false;
 466
 467        for (i = 0; i < adev->vce.num_rings; i++) {
 468                r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
 469                if (r)
 470                        return r;
 471                else
 472                        adev->vce.ring[i].ready = true;
 473        }
 474
 475        DRM_INFO("VCE initialized successfully.\n");
 476
 477        return 0;
 478}
 479
 480static int vce_v2_0_hw_fini(void *handle)
 481{
 482        return 0;
 483}
 484
 485static int vce_v2_0_suspend(void *handle)
 486{
 487        int r;
 488        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 489
 490        r = vce_v2_0_hw_fini(adev);
 491        if (r)
 492                return r;
 493
 494        return amdgpu_vce_suspend(adev);
 495}
 496
 497static int vce_v2_0_resume(void *handle)
 498{
 499        int r;
 500        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 501
 502        r = amdgpu_vce_resume(adev);
 503        if (r)
 504                return r;
 505
 506        return vce_v2_0_hw_init(adev);
 507}
 508
 509static int vce_v2_0_soft_reset(void *handle)
 510{
 511        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 512
 513        WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
 514        mdelay(5);
 515
 516        return vce_v2_0_start(adev);
 517}
 518
 519static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
 520                                        struct amdgpu_irq_src *source,
 521                                        unsigned type,
 522                                        enum amdgpu_interrupt_state state)
 523{
 524        uint32_t val = 0;
 525
 526        if (state == AMDGPU_IRQ_STATE_ENABLE)
 527                val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
 528
 529        WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
 530        return 0;
 531}
 532
 533static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
 534                                      struct amdgpu_irq_src *source,
 535                                      struct amdgpu_iv_entry *entry)
 536{
 537        DRM_DEBUG("IH: VCE\n");
 538        switch (entry->src_data[0]) {
 539        case 0:
 540        case 1:
 541                amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
 542                break;
 543        default:
 544                DRM_ERROR("Unhandled interrupt: %d %d\n",
 545                          entry->src_id, entry->src_data[0]);
 546                break;
 547        }
 548
 549        return 0;
 550}
 551
 552static int vce_v2_0_set_clockgating_state(void *handle,
 553                                          enum amd_clockgating_state state)
 554{
 555        bool gate = false;
 556        bool sw_cg = false;
 557
 558        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 559
 560        if (state == AMD_CG_STATE_GATE) {
 561                gate = true;
 562                sw_cg = true;
 563        }
 564
 565        vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 566
 567        return 0;
 568}
 569
 570static int vce_v2_0_set_powergating_state(void *handle,
 571                                          enum amd_powergating_state state)
 572{
 573        /* This doesn't actually powergate the VCE block.
 574         * That's done in the dpm code via the SMC.  This
 575         * just re-inits the block as necessary.  The actual
 576         * gating still happens in the dpm code.  We should
 577         * revisit this when there is a cleaner line between
 578         * the smc and the hw blocks
 579         */
 580        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 581
 582        if (state == AMD_PG_STATE_GATE)
 583                return vce_v2_0_stop(adev);
 584        else
 585                return vce_v2_0_start(adev);
 586}
 587
 588static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 589        .name = "vce_v2_0",
 590        .early_init = vce_v2_0_early_init,
 591        .late_init = NULL,
 592        .sw_init = vce_v2_0_sw_init,
 593        .sw_fini = vce_v2_0_sw_fini,
 594        .hw_init = vce_v2_0_hw_init,
 595        .hw_fini = vce_v2_0_hw_fini,
 596        .suspend = vce_v2_0_suspend,
 597        .resume = vce_v2_0_resume,
 598        .is_idle = vce_v2_0_is_idle,
 599        .wait_for_idle = vce_v2_0_wait_for_idle,
 600        .soft_reset = vce_v2_0_soft_reset,
 601        .set_clockgating_state = vce_v2_0_set_clockgating_state,
 602        .set_powergating_state = vce_v2_0_set_powergating_state,
 603};
 604
 605static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 606        .type = AMDGPU_RING_TYPE_VCE,
 607        .align_mask = 0xf,
 608        .nop = VCE_CMD_NO_OP,
 609        .support_64bit_ptrs = false,
 610        .get_rptr = vce_v2_0_ring_get_rptr,
 611        .get_wptr = vce_v2_0_ring_get_wptr,
 612        .set_wptr = vce_v2_0_ring_set_wptr,
 613        .parse_cs = amdgpu_vce_ring_parse_cs,
 614        .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
 615        .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 616        .emit_ib = amdgpu_vce_ring_emit_ib,
 617        .emit_fence = amdgpu_vce_ring_emit_fence,
 618        .test_ring = amdgpu_vce_ring_test_ring,
 619        .test_ib = amdgpu_vce_ring_test_ib,
 620        .insert_nop = amdgpu_ring_insert_nop,
 621        .pad_ib = amdgpu_ring_generic_pad_ib,
 622        .begin_use = amdgpu_vce_ring_begin_use,
 623        .end_use = amdgpu_vce_ring_end_use,
 624};
 625
 626static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 627{
 628        int i;
 629
 630        for (i = 0; i < adev->vce.num_rings; i++)
 631                adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 632}
 633
 634static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
 635        .set = vce_v2_0_set_interrupt_state,
 636        .process = vce_v2_0_process_interrupt,
 637};
 638
 639static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 640{
 641        adev->vce.irq.num_types = 1;
 642        adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
 643};
 644
 645const struct amdgpu_ip_block_version vce_v2_0_ip_block =
 646{
 647                .type = AMD_IP_BLOCK_TYPE_VCE,
 648                .major = 2,
 649                .minor = 0,
 650                .rev = 0,
 651                .funcs = &vce_v2_0_ip_funcs,
 652};
 653