linux/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 * Authors: Christian König <christian.koenig@amd.com>
  26 */
  27
  28#include <linux/firmware.h>
  29
  30#include "amdgpu.h"
  31#include "amdgpu_vce.h"
  32#include "cikd.h"
  33#include "vce/vce_2_0_d.h"
  34#include "vce/vce_2_0_sh_mask.h"
  35#include "smu/smu_7_0_1_d.h"
  36#include "smu/smu_7_0_1_sh_mask.h"
  37#include "oss/oss_2_0_d.h"
  38#include "oss/oss_2_0_sh_mask.h"
  39
  40#define VCE_V2_0_FW_SIZE        (256 * 1024)
  41#define VCE_V2_0_STACK_SIZE     (64 * 1024)
  42#define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
  43#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
  44
  45static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
  46static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
  47
  48/**
  49 * vce_v2_0_ring_get_rptr - get read pointer
  50 *
  51 * @ring: amdgpu_ring pointer
  52 *
  53 * Returns the current hardware read pointer
  54 */
  55static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
  56{
  57        struct amdgpu_device *adev = ring->adev;
  58
  59        if (ring->me == 0)
  60                return RREG32(mmVCE_RB_RPTR);
  61        else
  62                return RREG32(mmVCE_RB_RPTR2);
  63}
  64
  65/**
  66 * vce_v2_0_ring_get_wptr - get write pointer
  67 *
  68 * @ring: amdgpu_ring pointer
  69 *
  70 * Returns the current hardware write pointer
  71 */
  72static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
  73{
  74        struct amdgpu_device *adev = ring->adev;
  75
  76        if (ring->me == 0)
  77                return RREG32(mmVCE_RB_WPTR);
  78        else
  79                return RREG32(mmVCE_RB_WPTR2);
  80}
  81
  82/**
  83 * vce_v2_0_ring_set_wptr - set write pointer
  84 *
  85 * @ring: amdgpu_ring pointer
  86 *
  87 * Commits the write pointer to the hardware
  88 */
  89static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
  90{
  91        struct amdgpu_device *adev = ring->adev;
  92
  93        if (ring->me == 0)
  94                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
  95        else
  96                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
  97}
  98
  99static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
 100{
 101        int i, j;
 102
 103        for (i = 0; i < 10; ++i) {
 104                for (j = 0; j < 100; ++j) {
 105                        uint32_t status = RREG32(mmVCE_LMI_STATUS);
 106
 107                        if (status & 0x337f)
 108                                return 0;
 109                        mdelay(10);
 110                }
 111        }
 112
 113        return -ETIMEDOUT;
 114}
 115
 116static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
 117{
 118        int i, j;
 119
 120        for (i = 0; i < 10; ++i) {
 121                for (j = 0; j < 100; ++j) {
 122                        uint32_t status = RREG32(mmVCE_STATUS);
 123
 124                        if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
 125                                return 0;
 126                        mdelay(10);
 127                }
 128
 129                DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
 130                WREG32_P(mmVCE_SOFT_RESET,
 131                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
 132                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 133                mdelay(10);
 134                WREG32_P(mmVCE_SOFT_RESET, 0,
 135                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 136                mdelay(10);
 137        }
 138
 139        return -ETIMEDOUT;
 140}
 141
 142static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
 143{
 144        WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
 145}
 146
 147static void vce_v2_0_init_cg(struct amdgpu_device *adev)
 148{
 149        u32 tmp;
 150
 151        tmp = RREG32(mmVCE_CLOCK_GATING_A);
 152        tmp &= ~0xfff;
 153        tmp |= ((0 << 0) | (4 << 4));
 154        tmp |= 0x40000;
 155        WREG32(mmVCE_CLOCK_GATING_A, tmp);
 156
 157        tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 158        tmp &= ~0xfff;
 159        tmp |= ((0 << 0) | (4 << 4));
 160        WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 161
 162        tmp = RREG32(mmVCE_CLOCK_GATING_B);
 163        tmp |= 0x10;
 164        tmp &= ~0x100000;
 165        WREG32(mmVCE_CLOCK_GATING_B, tmp);
 166}
 167
 168static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
 169{
 170        uint32_t size, offset;
 171
 172        WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
 173        WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
 174        WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
 175        WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
 176
 177        WREG32(mmVCE_LMI_CTRL, 0x00398000);
 178        WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
 179        WREG32(mmVCE_LMI_SWAP_CNTL, 0);
 180        WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
 181        WREG32(mmVCE_LMI_VM_CTRL, 0);
 182
 183        WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
 184
 185        offset = AMDGPU_VCE_FIRMWARE_OFFSET;
 186        size = VCE_V2_0_FW_SIZE;
 187        WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
 188        WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
 189
 190        offset += size;
 191        size = VCE_V2_0_STACK_SIZE;
 192        WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
 193        WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
 194
 195        offset += size;
 196        size = VCE_V2_0_DATA_SIZE;
 197        WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
 198        WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
 199
 200        WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
 201        WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
 202}
 203
 204static bool vce_v2_0_is_idle(void *handle)
 205{
 206        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 207
 208        return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
 209}
 210
 211static int vce_v2_0_wait_for_idle(void *handle)
 212{
 213        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 214        unsigned i;
 215
 216        for (i = 0; i < adev->usec_timeout; i++) {
 217                if (vce_v2_0_is_idle(handle))
 218                        return 0;
 219        }
 220        return -ETIMEDOUT;
 221}
 222
 223/**
 224 * vce_v2_0_start - start VCE block
 225 *
 226 * @adev: amdgpu_device pointer
 227 *
 228 * Setup and start the VCE block
 229 */
 230static int vce_v2_0_start(struct amdgpu_device *adev)
 231{
 232        struct amdgpu_ring *ring;
 233        int r;
 234
 235        /* set BUSY flag */
 236        WREG32_P(mmVCE_STATUS, 1, ~1);
 237
 238        vce_v2_0_init_cg(adev);
 239        vce_v2_0_disable_cg(adev);
 240
 241        vce_v2_0_mc_resume(adev);
 242
 243        ring = &adev->vce.ring[0];
 244        WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
 245        WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
 246        WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
 247        WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 248        WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
 249
 250        ring = &adev->vce.ring[1];
 251        WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
 252        WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
 253        WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
 254        WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 255        WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 256
 257        WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
 258        WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
 259        mdelay(100);
 260        WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
 261
 262        r = vce_v2_0_firmware_loaded(adev);
 263
 264        /* clear BUSY flag */
 265        WREG32_P(mmVCE_STATUS, 0, ~1);
 266
 267        if (r) {
 268                DRM_ERROR("VCE not responding, giving up!!!\n");
 269                return r;
 270        }
 271
 272        return 0;
 273}
 274
 275static int vce_v2_0_stop(struct amdgpu_device *adev)
 276{
 277        int i;
 278        int status;
 279
 280        if (vce_v2_0_lmi_clean(adev)) {
 281                DRM_INFO("vce is not idle \n");
 282                return 0;
 283        }
 284
 285        if (vce_v2_0_wait_for_idle(adev)) {
 286                DRM_INFO("VCE is busy, Can't set clock gating");
 287                return 0;
 288        }
 289
 290        /* Stall UMC and register bus before resetting VCPU */
 291        WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
 292
 293        for (i = 0; i < 100; ++i) {
 294                status = RREG32(mmVCE_LMI_STATUS);
 295                if (status & 0x240)
 296                        break;
 297                mdelay(1);
 298        }
 299
 300        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
 301
 302        /* put LMI, VCPU, RBC etc... into reset */
 303        WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
 304
 305        WREG32(mmVCE_STATUS, 0);
 306
 307        return 0;
 308}
 309
 310static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
 311{
 312        u32 tmp;
 313
 314        if (gated) {
 315                tmp = RREG32(mmVCE_CLOCK_GATING_B);
 316                tmp |= 0xe70000;
 317                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 318
 319                tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 320                tmp |= 0xff000000;
 321                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 322
 323                tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 324                tmp &= ~0x3fc;
 325                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 326
 327                WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 328        } else {
 329                tmp = RREG32(mmVCE_CLOCK_GATING_B);
 330                tmp |= 0xe7;
 331                tmp &= ~0xe70000;
 332                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 333
 334                tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 335                tmp |= 0x1fe000;
 336                tmp &= ~0xff000000;
 337                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 338
 339                tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 340                tmp |= 0x3fc;
 341                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 342        }
 343}
 344
 345static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
 346{
 347        u32 orig, tmp;
 348
 349/* LMI_MC/LMI_UMC always set in dynamic,
 350 * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
 351 */
 352        tmp = RREG32(mmVCE_CLOCK_GATING_B);
 353        tmp &= ~0x00060006;
 354
 355/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
 356        if (gated) {
 357                tmp |= 0xe10000;
 358                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 359        } else {
 360                tmp |= 0xe1;
 361                tmp &= ~0xe10000;
 362                WREG32(mmVCE_CLOCK_GATING_B, tmp);
 363        }
 364
 365        orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
 366        tmp &= ~0x1fe000;
 367        tmp &= ~0xff000000;
 368        if (tmp != orig)
 369                WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
 370
 371        orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
 372        tmp &= ~0x3fc;
 373        if (tmp != orig)
 374                WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
 375
 376        /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
 377        WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
 378
 379        if(gated)
 380                WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
 381}
 382
 383static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
 384                                                                bool sw_cg)
 385{
 386        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
 387                if (sw_cg)
 388                        vce_v2_0_set_sw_cg(adev, true);
 389                else
 390                        vce_v2_0_set_dyn_cg(adev, true);
 391        } else {
 392                vce_v2_0_disable_cg(adev);
 393
 394                if (sw_cg)
 395                        vce_v2_0_set_sw_cg(adev, false);
 396                else
 397                        vce_v2_0_set_dyn_cg(adev, false);
 398        }
 399}
 400
 401static int vce_v2_0_early_init(void *handle)
 402{
 403        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 404
 405        adev->vce.num_rings = 2;
 406
 407        vce_v2_0_set_ring_funcs(adev);
 408        vce_v2_0_set_irq_funcs(adev);
 409
 410        return 0;
 411}
 412
 413static int vce_v2_0_sw_init(void *handle)
 414{
 415        struct amdgpu_ring *ring;
 416        int r, i;
 417        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 418
 419        /* VCE */
 420        r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
 421        if (r)
 422                return r;
 423
 424        r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
 425                VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
 426        if (r)
 427                return r;
 428
 429        r = amdgpu_vce_resume(adev);
 430        if (r)
 431                return r;
 432
 433        for (i = 0; i < adev->vce.num_rings; i++) {
 434                ring = &adev->vce.ring[i];
 435                sprintf(ring->name, "vce%d", i);
 436                r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
 437                                     AMDGPU_RING_PRIO_DEFAULT, NULL);
 438                if (r)
 439                        return r;
 440        }
 441
 442        r = amdgpu_vce_entity_init(adev);
 443
 444        return r;
 445}
 446
 447static int vce_v2_0_sw_fini(void *handle)
 448{
 449        int r;
 450        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 451
 452        r = amdgpu_vce_suspend(adev);
 453        if (r)
 454                return r;
 455
 456        return amdgpu_vce_sw_fini(adev);
 457}
 458
 459static int vce_v2_0_hw_init(void *handle)
 460{
 461        int r, i;
 462        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 463
 464        amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
 465        vce_v2_0_enable_mgcg(adev, true, false);
 466
 467        for (i = 0; i < adev->vce.num_rings; i++) {
 468                r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
 469                if (r)
 470                        return r;
 471        }
 472
 473        DRM_INFO("VCE initialized successfully.\n");
 474
 475        return 0;
 476}
 477
 478static int vce_v2_0_hw_fini(void *handle)
 479{
 480        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 481
 482        /*
 483         * Proper cleanups before halting the HW engine:
 484         *   - cancel the delayed idle work
 485         *   - enable powergating
 486         *   - enable clockgating
 487         *   - disable dpm
 488         *
 489         * TODO: to align with the VCN implementation, move the
 490         * jobs for clockgating/powergating/dpm setting to
 491         * ->set_powergating_state().
 492         */
 493        cancel_delayed_work_sync(&adev->vce.idle_work);
 494
 495        if (adev->pm.dpm_enabled) {
 496                amdgpu_dpm_enable_vce(adev, false);
 497        } else {
 498                amdgpu_asic_set_vce_clocks(adev, 0, 0);
 499                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 500                                                       AMD_PG_STATE_GATE);
 501                amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
 502                                                       AMD_CG_STATE_GATE);
 503        }
 504
 505        return 0;
 506}
 507
 508static int vce_v2_0_suspend(void *handle)
 509{
 510        int r;
 511        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 512
 513        r = vce_v2_0_hw_fini(adev);
 514        if (r)
 515                return r;
 516
 517        return amdgpu_vce_suspend(adev);
 518}
 519
 520static int vce_v2_0_resume(void *handle)
 521{
 522        int r;
 523        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 524
 525        r = amdgpu_vce_resume(adev);
 526        if (r)
 527                return r;
 528
 529        return vce_v2_0_hw_init(adev);
 530}
 531
 532static int vce_v2_0_soft_reset(void *handle)
 533{
 534        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 535
 536        WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
 537        mdelay(5);
 538
 539        return vce_v2_0_start(adev);
 540}
 541
 542static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
 543                                        struct amdgpu_irq_src *source,
 544                                        unsigned type,
 545                                        enum amdgpu_interrupt_state state)
 546{
 547        uint32_t val = 0;
 548
 549        if (state == AMDGPU_IRQ_STATE_ENABLE)
 550                val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
 551
 552        WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
 553        return 0;
 554}
 555
 556static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
 557                                      struct amdgpu_irq_src *source,
 558                                      struct amdgpu_iv_entry *entry)
 559{
 560        DRM_DEBUG("IH: VCE\n");
 561        switch (entry->src_data[0]) {
 562        case 0:
 563        case 1:
 564                amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
 565                break;
 566        default:
 567                DRM_ERROR("Unhandled interrupt: %d %d\n",
 568                          entry->src_id, entry->src_data[0]);
 569                break;
 570        }
 571
 572        return 0;
 573}
 574
 575static int vce_v2_0_set_clockgating_state(void *handle,
 576                                          enum amd_clockgating_state state)
 577{
 578        bool gate = false;
 579        bool sw_cg = false;
 580
 581        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 582
 583        if (state == AMD_CG_STATE_GATE) {
 584                gate = true;
 585                sw_cg = true;
 586        }
 587
 588        vce_v2_0_enable_mgcg(adev, gate, sw_cg);
 589
 590        return 0;
 591}
 592
 593static int vce_v2_0_set_powergating_state(void *handle,
 594                                          enum amd_powergating_state state)
 595{
 596        /* This doesn't actually powergate the VCE block.
 597         * That's done in the dpm code via the SMC.  This
 598         * just re-inits the block as necessary.  The actual
 599         * gating still happens in the dpm code.  We should
 600         * revisit this when there is a cleaner line between
 601         * the smc and the hw blocks
 602         */
 603        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 604
 605        if (state == AMD_PG_STATE_GATE)
 606                return vce_v2_0_stop(adev);
 607        else
 608                return vce_v2_0_start(adev);
 609}
 610
 611static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
 612        .name = "vce_v2_0",
 613        .early_init = vce_v2_0_early_init,
 614        .late_init = NULL,
 615        .sw_init = vce_v2_0_sw_init,
 616        .sw_fini = vce_v2_0_sw_fini,
 617        .hw_init = vce_v2_0_hw_init,
 618        .hw_fini = vce_v2_0_hw_fini,
 619        .suspend = vce_v2_0_suspend,
 620        .resume = vce_v2_0_resume,
 621        .is_idle = vce_v2_0_is_idle,
 622        .wait_for_idle = vce_v2_0_wait_for_idle,
 623        .soft_reset = vce_v2_0_soft_reset,
 624        .set_clockgating_state = vce_v2_0_set_clockgating_state,
 625        .set_powergating_state = vce_v2_0_set_powergating_state,
 626};
 627
 628static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
 629        .type = AMDGPU_RING_TYPE_VCE,
 630        .align_mask = 0xf,
 631        .nop = VCE_CMD_NO_OP,
 632        .support_64bit_ptrs = false,
 633        .no_user_fence = true,
 634        .get_rptr = vce_v2_0_ring_get_rptr,
 635        .get_wptr = vce_v2_0_ring_get_wptr,
 636        .set_wptr = vce_v2_0_ring_set_wptr,
 637        .parse_cs = amdgpu_vce_ring_parse_cs,
 638        .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
 639        .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
 640        .emit_ib = amdgpu_vce_ring_emit_ib,
 641        .emit_fence = amdgpu_vce_ring_emit_fence,
 642        .test_ring = amdgpu_vce_ring_test_ring,
 643        .test_ib = amdgpu_vce_ring_test_ib,
 644        .insert_nop = amdgpu_ring_insert_nop,
 645        .pad_ib = amdgpu_ring_generic_pad_ib,
 646        .begin_use = amdgpu_vce_ring_begin_use,
 647        .end_use = amdgpu_vce_ring_end_use,
 648};
 649
 650static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 651{
 652        int i;
 653
 654        for (i = 0; i < adev->vce.num_rings; i++) {
 655                adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
 656                adev->vce.ring[i].me = i;
 657        }
 658}
 659
 660static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
 661        .set = vce_v2_0_set_interrupt_state,
 662        .process = vce_v2_0_process_interrupt,
 663};
 664
 665static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 666{
 667        adev->vce.irq.num_types = 1;
 668        adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
 669};
 670
 671const struct amdgpu_ip_block_version vce_v2_0_ip_block =
 672{
 673                .type = AMD_IP_BLOCK_TYPE_VCE,
 674                .major = 2,
 675                .minor = 0,
 676                .rev = 0,
 677                .funcs = &vce_v2_0_ip_funcs,
 678};
 679