linux/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König <christian.koenig@amd.com>
  23 */
  24
  25#include <linux/firmware.h>
  26#include <drm/drmP.h>
  27#include "amdgpu.h"
  28#include "amdgpu_uvd.h"
  29#include "vid.h"
  30#include "uvd/uvd_6_0_d.h"
  31#include "uvd/uvd_6_0_sh_mask.h"
  32#include "oss/oss_2_0_d.h"
  33#include "oss/oss_2_0_sh_mask.h"
  34#include "smu/smu_7_1_3_d.h"
  35#include "smu/smu_7_1_3_sh_mask.h"
  36#include "bif/bif_5_1_d.h"
  37#include "gmc/gmc_8_1_d.h"
  38#include "vi.h"
  39
  40static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
  41static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
  42static int uvd_v6_0_start(struct amdgpu_device *adev);
  43static void uvd_v6_0_stop(struct amdgpu_device *adev);
  44static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
  45static int uvd_v6_0_set_clockgating_state(void *handle,
  46                                          enum amd_clockgating_state state);
  47static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
  48                                 bool enable);
  49
  50/**
  51 * uvd_v6_0_ring_get_rptr - get read pointer
  52 *
  53 * @ring: amdgpu_ring pointer
  54 *
  55 * Returns the current hardware read pointer
  56 */
  57static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
  58{
  59        struct amdgpu_device *adev = ring->adev;
  60
  61        return RREG32(mmUVD_RBC_RB_RPTR);
  62}
  63
  64/**
  65 * uvd_v6_0_ring_get_wptr - get write pointer
  66 *
  67 * @ring: amdgpu_ring pointer
  68 *
  69 * Returns the current hardware write pointer
  70 */
  71static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
  72{
  73        struct amdgpu_device *adev = ring->adev;
  74
  75        return RREG32(mmUVD_RBC_RB_WPTR);
  76}
  77
  78/**
  79 * uvd_v6_0_ring_set_wptr - set write pointer
  80 *
  81 * @ring: amdgpu_ring pointer
  82 *
  83 * Commits the write pointer to the hardware
  84 */
  85static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
  86{
  87        struct amdgpu_device *adev = ring->adev;
  88
  89        WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
  90}
  91
  92static int uvd_v6_0_early_init(void *handle)
  93{
  94        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  95
  96        uvd_v6_0_set_ring_funcs(adev);
  97        uvd_v6_0_set_irq_funcs(adev);
  98
  99        return 0;
 100}
 101
 102static int uvd_v6_0_sw_init(void *handle)
 103{
 104        struct amdgpu_ring *ring;
 105        int r;
 106        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 107
 108        /* UVD TRAP */
 109        r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
 110        if (r)
 111                return r;
 112
 113        r = amdgpu_uvd_sw_init(adev);
 114        if (r)
 115                return r;
 116
 117        r = amdgpu_uvd_resume(adev);
 118        if (r)
 119                return r;
 120
 121        ring = &adev->uvd.ring;
 122        sprintf(ring->name, "uvd");
 123        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
 124
 125        return r;
 126}
 127
 128static int uvd_v6_0_sw_fini(void *handle)
 129{
 130        int r;
 131        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 132
 133        r = amdgpu_uvd_suspend(adev);
 134        if (r)
 135                return r;
 136
 137        return amdgpu_uvd_sw_fini(adev);
 138}
 139
 140/**
 141 * uvd_v6_0_hw_init - start and test UVD block
 142 *
 143 * @adev: amdgpu_device pointer
 144 *
 145 * Initialize the hardware, boot up the VCPU and do some testing
 146 */
 147static int uvd_v6_0_hw_init(void *handle)
 148{
 149        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 150        struct amdgpu_ring *ring = &adev->uvd.ring;
 151        uint32_t tmp;
 152        int r;
 153
 154        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
 155        uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
 156        uvd_v6_0_enable_mgcg(adev, true);
 157
 158        ring->ready = true;
 159        r = amdgpu_ring_test_ring(ring);
 160        if (r) {
 161                ring->ready = false;
 162                goto done;
 163        }
 164
 165        r = amdgpu_ring_alloc(ring, 10);
 166        if (r) {
 167                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 168                goto done;
 169        }
 170
 171        tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 172        amdgpu_ring_write(ring, tmp);
 173        amdgpu_ring_write(ring, 0xFFFFF);
 174
 175        tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 176        amdgpu_ring_write(ring, tmp);
 177        amdgpu_ring_write(ring, 0xFFFFF);
 178
 179        tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 180        amdgpu_ring_write(ring, tmp);
 181        amdgpu_ring_write(ring, 0xFFFFF);
 182
 183        /* Clear timeout status bits */
 184        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 185        amdgpu_ring_write(ring, 0x8);
 186
 187        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 188        amdgpu_ring_write(ring, 3);
 189
 190        amdgpu_ring_commit(ring);
 191
 192done:
 193        if (!r)
 194                DRM_INFO("UVD initialized successfully.\n");
 195
 196        return r;
 197}
 198
 199/**
 200 * uvd_v6_0_hw_fini - stop the hardware block
 201 *
 202 * @adev: amdgpu_device pointer
 203 *
 204 * Stop the UVD block, mark ring as not ready any more
 205 */
 206static int uvd_v6_0_hw_fini(void *handle)
 207{
 208        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 209        struct amdgpu_ring *ring = &adev->uvd.ring;
 210
 211        if (RREG32(mmUVD_STATUS) != 0)
 212                uvd_v6_0_stop(adev);
 213
 214        ring->ready = false;
 215
 216        return 0;
 217}
 218
 219static int uvd_v6_0_suspend(void *handle)
 220{
 221        int r;
 222        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 223
 224        r = uvd_v6_0_hw_fini(adev);
 225        if (r)
 226                return r;
 227
 228        /* Skip this for APU for now */
 229        if (!(adev->flags & AMD_IS_APU))
 230                r = amdgpu_uvd_suspend(adev);
 231
 232        return r;
 233}
 234
 235static int uvd_v6_0_resume(void *handle)
 236{
 237        int r;
 238        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 239
 240        /* Skip this for APU for now */
 241        if (!(adev->flags & AMD_IS_APU)) {
 242                r = amdgpu_uvd_resume(adev);
 243                if (r)
 244                        return r;
 245        }
 246        return uvd_v6_0_hw_init(adev);
 247}
 248
 249/**
 250 * uvd_v6_0_mc_resume - memory controller programming
 251 *
 252 * @adev: amdgpu_device pointer
 253 *
 254 * Let the UVD memory controller know it's offsets
 255 */
 256static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
 257{
 258        uint64_t offset;
 259        uint32_t size;
 260
 261        /* programm memory controller bits 0-27 */
 262        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 263                        lower_32_bits(adev->uvd.gpu_addr));
 264        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 265                        upper_32_bits(adev->uvd.gpu_addr));
 266
 267        offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 268        size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 269        WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
 270        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 271
 272        offset += size;
 273        size = AMDGPU_UVD_HEAP_SIZE;
 274        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 275        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 276
 277        offset += size;
 278        size = AMDGPU_UVD_STACK_SIZE +
 279               (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 280        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 281        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 282
 283        WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 284        WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 285        WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 286
 287        WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 288}
 289
 290#if 0
 291static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
 292                bool enable)
 293{
 294        u32 data, data1;
 295
 296        data = RREG32(mmUVD_CGC_GATE);
 297        data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 298        if (enable) {
 299                data |= UVD_CGC_GATE__SYS_MASK |
 300                                UVD_CGC_GATE__UDEC_MASK |
 301                                UVD_CGC_GATE__MPEG2_MASK |
 302                                UVD_CGC_GATE__RBC_MASK |
 303                                UVD_CGC_GATE__LMI_MC_MASK |
 304                                UVD_CGC_GATE__IDCT_MASK |
 305                                UVD_CGC_GATE__MPRD_MASK |
 306                                UVD_CGC_GATE__MPC_MASK |
 307                                UVD_CGC_GATE__LBSI_MASK |
 308                                UVD_CGC_GATE__LRBBM_MASK |
 309                                UVD_CGC_GATE__UDEC_RE_MASK |
 310                                UVD_CGC_GATE__UDEC_CM_MASK |
 311                                UVD_CGC_GATE__UDEC_IT_MASK |
 312                                UVD_CGC_GATE__UDEC_DB_MASK |
 313                                UVD_CGC_GATE__UDEC_MP_MASK |
 314                                UVD_CGC_GATE__WCB_MASK |
 315                                UVD_CGC_GATE__VCPU_MASK |
 316                                UVD_CGC_GATE__SCPU_MASK;
 317                data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
 318                                UVD_SUVD_CGC_GATE__SIT_MASK |
 319                                UVD_SUVD_CGC_GATE__SMP_MASK |
 320                                UVD_SUVD_CGC_GATE__SCM_MASK |
 321                                UVD_SUVD_CGC_GATE__SDB_MASK |
 322                                UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 323                                UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 324                                UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 325                                UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 326                                UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 327                                UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 328                                UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 329                                UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
 330        } else {
 331                data &= ~(UVD_CGC_GATE__SYS_MASK |
 332                                UVD_CGC_GATE__UDEC_MASK |
 333                                UVD_CGC_GATE__MPEG2_MASK |
 334                                UVD_CGC_GATE__RBC_MASK |
 335                                UVD_CGC_GATE__LMI_MC_MASK |
 336                                UVD_CGC_GATE__LMI_UMC_MASK |
 337                                UVD_CGC_GATE__IDCT_MASK |
 338                                UVD_CGC_GATE__MPRD_MASK |
 339                                UVD_CGC_GATE__MPC_MASK |
 340                                UVD_CGC_GATE__LBSI_MASK |
 341                                UVD_CGC_GATE__LRBBM_MASK |
 342                                UVD_CGC_GATE__UDEC_RE_MASK |
 343                                UVD_CGC_GATE__UDEC_CM_MASK |
 344                                UVD_CGC_GATE__UDEC_IT_MASK |
 345                                UVD_CGC_GATE__UDEC_DB_MASK |
 346                                UVD_CGC_GATE__UDEC_MP_MASK |
 347                                UVD_CGC_GATE__WCB_MASK |
 348                                UVD_CGC_GATE__VCPU_MASK |
 349                                UVD_CGC_GATE__SCPU_MASK);
 350                data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
 351                                UVD_SUVD_CGC_GATE__SIT_MASK |
 352                                UVD_SUVD_CGC_GATE__SMP_MASK |
 353                                UVD_SUVD_CGC_GATE__SCM_MASK |
 354                                UVD_SUVD_CGC_GATE__SDB_MASK |
 355                                UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 356                                UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 357                                UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 358                                UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 359                                UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 360                                UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 361                                UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 362                                UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
 363        }
 364        WREG32(mmUVD_CGC_GATE, data);
 365        WREG32(mmUVD_SUVD_CGC_GATE, data1);
 366}
 367#endif
 368
 369/**
 370 * uvd_v6_0_start - start UVD block
 371 *
 372 * @adev: amdgpu_device pointer
 373 *
 374 * Setup and start the UVD block
 375 */
 376static int uvd_v6_0_start(struct amdgpu_device *adev)
 377{
 378        struct amdgpu_ring *ring = &adev->uvd.ring;
 379        uint32_t rb_bufsz, tmp;
 380        uint32_t lmi_swap_cntl;
 381        uint32_t mp_swap_cntl;
 382        int i, j, r;
 383
 384        /* disable DPG */
 385        WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 386
 387        /* disable byte swapping */
 388        lmi_swap_cntl = 0;
 389        mp_swap_cntl = 0;
 390
 391        uvd_v6_0_mc_resume(adev);
 392
 393        /* disable interupt */
 394        WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
 395
 396        /* stall UMC and register bus before resetting VCPU */
 397        WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
 398        mdelay(1);
 399
 400        /* put LMI, VCPU, RBC etc... into reset */
 401        WREG32(mmUVD_SOFT_RESET,
 402                UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 403                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 404                UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 405                UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 406                UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 407                UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 408                UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 409                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 410        mdelay(5);
 411
 412        /* take UVD block out of reset */
 413        WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
 414        mdelay(5);
 415
 416        /* initialize UVD memory controller */
 417        WREG32(mmUVD_LMI_CTRL,
 418                (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 419                UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 420                UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 421                UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 422                UVD_LMI_CTRL__REQ_MODE_MASK |
 423                UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
 424
 425#ifdef __BIG_ENDIAN
 426        /* swap (8 in 32) RB and IB */
 427        lmi_swap_cntl = 0xa;
 428        mp_swap_cntl = 0;
 429#endif
 430        WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 431        WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 432
 433        WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 434        WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 435        WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 436        WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 437        WREG32(mmUVD_MPC_SET_ALU, 0);
 438        WREG32(mmUVD_MPC_SET_MUX, 0x88);
 439
 440        /* take all subblocks out of reset, except VCPU */
 441        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 442        mdelay(5);
 443
 444        /* enable VCPU clock */
 445        WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 446
 447        /* enable UMC */
 448        WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
 449
 450        /* boot up the VCPU */
 451        WREG32(mmUVD_SOFT_RESET, 0);
 452        mdelay(10);
 453
 454        for (i = 0; i < 10; ++i) {
 455                uint32_t status;
 456
 457                for (j = 0; j < 100; ++j) {
 458                        status = RREG32(mmUVD_STATUS);
 459                        if (status & 2)
 460                                break;
 461                        mdelay(10);
 462                }
 463                r = 0;
 464                if (status & 2)
 465                        break;
 466
 467                DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 468                WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
 469                mdelay(10);
 470                WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
 471                mdelay(10);
 472                r = -1;
 473        }
 474
 475        if (r) {
 476                DRM_ERROR("UVD not responding, giving up!!!\n");
 477                return r;
 478        }
 479        /* enable master interrupt */
 480        WREG32_P(mmUVD_MASTINT_EN,
 481                (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 482                ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 483
 484        /* clear the bit 4 of UVD_STATUS */
 485        WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 486
 487        /* force RBC into idle state */
 488        rb_bufsz = order_base_2(ring->ring_size);
 489        tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 490        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 491        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 492        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 493        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 494        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 495        WREG32(mmUVD_RBC_RB_CNTL, tmp);
 496
 497        /* set the write pointer delay */
 498        WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 499
 500        /* set the wb address */
 501        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 502
 503        /* programm the RB_BASE for ring buffer */
 504        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 505                        lower_32_bits(ring->gpu_addr));
 506        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 507                        upper_32_bits(ring->gpu_addr));
 508
 509        /* Initialize the ring buffer's read and write pointers */
 510        WREG32(mmUVD_RBC_RB_RPTR, 0);
 511
 512        ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 513        WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 514
 515        WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
 516
 517        return 0;
 518}
 519
 520/**
 521 * uvd_v6_0_stop - stop UVD block
 522 *
 523 * @adev: amdgpu_device pointer
 524 *
 525 * stop the UVD block
 526 */
 527static void uvd_v6_0_stop(struct amdgpu_device *adev)
 528{
 529        /* force RBC into idle state */
 530        WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 531
 532        /* Stall UMC and register bus before resetting VCPU */
 533        WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 534        mdelay(1);
 535
 536        /* put VCPU into reset */
 537        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 538        mdelay(5);
 539
 540        /* disable VCPU clock */
 541        WREG32(mmUVD_VCPU_CNTL, 0x0);
 542
 543        /* Unstall UMC and register bus */
 544        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 545
 546        WREG32(mmUVD_STATUS, 0);
 547}
 548
 549/**
 550 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
 551 *
 552 * @ring: amdgpu_ring pointer
 553 * @fence: fence to emit
 554 *
 555 * Write a fence and a trap command to the ring.
 556 */
 557static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 558                                     unsigned flags)
 559{
 560        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 561
 562        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 563        amdgpu_ring_write(ring, seq);
 564        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 565        amdgpu_ring_write(ring, addr & 0xffffffff);
 566        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 567        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 568        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 569        amdgpu_ring_write(ring, 0);
 570
 571        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 572        amdgpu_ring_write(ring, 0);
 573        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 574        amdgpu_ring_write(ring, 0);
 575        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 576        amdgpu_ring_write(ring, 2);
 577}
 578
 579/**
 580 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
 581 *
 582 * @ring: amdgpu_ring pointer
 583 *
 584 * Emits an hdp flush.
 585 */
 586static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 587{
 588        amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
 589        amdgpu_ring_write(ring, 0);
 590}
 591
 592/**
 593 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
 594 *
 595 * @ring: amdgpu_ring pointer
 596 *
 597 * Emits an hdp invalidate.
 598 */
 599static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 600{
 601        amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
 602        amdgpu_ring_write(ring, 1);
 603}
 604
 605/**
 606 * uvd_v6_0_ring_test_ring - register write test
 607 *
 608 * @ring: amdgpu_ring pointer
 609 *
 610 * Test if we can successfully write to the context register
 611 */
 612static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
 613{
 614        struct amdgpu_device *adev = ring->adev;
 615        uint32_t tmp = 0;
 616        unsigned i;
 617        int r;
 618
 619        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 620        r = amdgpu_ring_alloc(ring, 3);
 621        if (r) {
 622                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 623                          ring->idx, r);
 624                return r;
 625        }
 626        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 627        amdgpu_ring_write(ring, 0xDEADBEEF);
 628        amdgpu_ring_commit(ring);
 629        for (i = 0; i < adev->usec_timeout; i++) {
 630                tmp = RREG32(mmUVD_CONTEXT_ID);
 631                if (tmp == 0xDEADBEEF)
 632                        break;
 633                DRM_UDELAY(1);
 634        }
 635
 636        if (i < adev->usec_timeout) {
 637                DRM_INFO("ring test on %d succeeded in %d usecs\n",
 638                         ring->idx, i);
 639        } else {
 640                DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 641                          ring->idx, tmp);
 642                r = -EINVAL;
 643        }
 644        return r;
 645}
 646
 647/**
 648 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 649 *
 650 * @ring: amdgpu_ring pointer
 651 * @ib: indirect buffer to execute
 652 *
 653 * Write ring commands to execute the indirect buffer
 654 */
 655static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
 656                                  struct amdgpu_ib *ib,
 657                                  unsigned vm_id, bool ctx_switch)
 658{
 659        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
 660        amdgpu_ring_write(ring, vm_id);
 661
 662        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 663        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 664        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
 665        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 666        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
 667        amdgpu_ring_write(ring, ib->length_dw);
 668}
 669
 670static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 671                                         unsigned vm_id, uint64_t pd_addr)
 672{
 673        uint32_t reg;
 674
 675        if (vm_id < 8)
 676                reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
 677        else
 678                reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
 679
 680        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 681        amdgpu_ring_write(ring, reg << 2);
 682        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 683        amdgpu_ring_write(ring, pd_addr >> 12);
 684        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 685        amdgpu_ring_write(ring, 0x8);
 686
 687        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 688        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 689        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 690        amdgpu_ring_write(ring, 1 << vm_id);
 691        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 692        amdgpu_ring_write(ring, 0x8);
 693
 694        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 695        amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
 696        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 697        amdgpu_ring_write(ring, 0);
 698        amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
 699        amdgpu_ring_write(ring, 1 << vm_id); /* mask */
 700        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 701        amdgpu_ring_write(ring, 0xC);
 702}
 703
 704static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
 705{
 706        uint32_t seq = ring->fence_drv.sync_seq;
 707        uint64_t addr = ring->fence_drv.gpu_addr;
 708
 709        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 710        amdgpu_ring_write(ring, lower_32_bits(addr));
 711        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 712        amdgpu_ring_write(ring, upper_32_bits(addr));
 713        amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
 714        amdgpu_ring_write(ring, 0xffffffff); /* mask */
 715        amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
 716        amdgpu_ring_write(ring, seq);
 717        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 718        amdgpu_ring_write(ring, 0xE);
 719}
 720
 721static bool uvd_v6_0_is_idle(void *handle)
 722{
 723        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 724
 725        return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 726}
 727
 728static int uvd_v6_0_wait_for_idle(void *handle)
 729{
 730        unsigned i;
 731        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 732
 733        for (i = 0; i < adev->usec_timeout; i++) {
 734                if (uvd_v6_0_is_idle(handle))
 735                        return 0;
 736        }
 737        return -ETIMEDOUT;
 738}
 739
 740#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
 741static bool uvd_v6_0_check_soft_reset(void *handle)
 742{
 743        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 744        u32 srbm_soft_reset = 0;
 745        u32 tmp = RREG32(mmSRBM_STATUS);
 746
 747        if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
 748            REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
 749            (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
 750                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
 751
 752        if (srbm_soft_reset) {
 753                adev->uvd.srbm_soft_reset = srbm_soft_reset;
 754                return true;
 755        } else {
 756                adev->uvd.srbm_soft_reset = 0;
 757                return false;
 758        }
 759}
 760
 761static int uvd_v6_0_pre_soft_reset(void *handle)
 762{
 763        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 764
 765        if (!adev->uvd.srbm_soft_reset)
 766                return 0;
 767
 768        uvd_v6_0_stop(adev);
 769        return 0;
 770}
 771
 772static int uvd_v6_0_soft_reset(void *handle)
 773{
 774        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 775        u32 srbm_soft_reset;
 776
 777        if (!adev->uvd.srbm_soft_reset)
 778                return 0;
 779        srbm_soft_reset = adev->uvd.srbm_soft_reset;
 780
 781        if (srbm_soft_reset) {
 782                u32 tmp;
 783
 784                tmp = RREG32(mmSRBM_SOFT_RESET);
 785                tmp |= srbm_soft_reset;
 786                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
 787                WREG32(mmSRBM_SOFT_RESET, tmp);
 788                tmp = RREG32(mmSRBM_SOFT_RESET);
 789
 790                udelay(50);
 791
 792                tmp &= ~srbm_soft_reset;
 793                WREG32(mmSRBM_SOFT_RESET, tmp);
 794                tmp = RREG32(mmSRBM_SOFT_RESET);
 795
 796                /* Wait a little for things to settle down */
 797                udelay(50);
 798        }
 799
 800        return 0;
 801}
 802
 803static int uvd_v6_0_post_soft_reset(void *handle)
 804{
 805        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 806
 807        if (!adev->uvd.srbm_soft_reset)
 808                return 0;
 809
 810        mdelay(5);
 811
 812        return uvd_v6_0_start(adev);
 813}
 814
 815static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
 816                                        struct amdgpu_irq_src *source,
 817                                        unsigned type,
 818                                        enum amdgpu_interrupt_state state)
 819{
 820        // TODO
 821        return 0;
 822}
 823
 824static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
 825                                      struct amdgpu_irq_src *source,
 826                                      struct amdgpu_iv_entry *entry)
 827{
 828        DRM_DEBUG("IH: UVD TRAP\n");
 829        amdgpu_fence_process(&adev->uvd.ring);
 830        return 0;
 831}
 832
 833static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
 834{
 835        uint32_t data1, data3;
 836
 837        data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 838        data3 = RREG32(mmUVD_CGC_GATE);
 839
 840        data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
 841                     UVD_SUVD_CGC_GATE__SIT_MASK |
 842                     UVD_SUVD_CGC_GATE__SMP_MASK |
 843                     UVD_SUVD_CGC_GATE__SCM_MASK |
 844                     UVD_SUVD_CGC_GATE__SDB_MASK |
 845                     UVD_SUVD_CGC_GATE__SRE_H264_MASK |
 846                     UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
 847                     UVD_SUVD_CGC_GATE__SIT_H264_MASK |
 848                     UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
 849                     UVD_SUVD_CGC_GATE__SCM_H264_MASK |
 850                     UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
 851                     UVD_SUVD_CGC_GATE__SDB_H264_MASK |
 852                     UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
 853
 854        if (enable) {
 855                data3 |= (UVD_CGC_GATE__SYS_MASK       |
 856                        UVD_CGC_GATE__UDEC_MASK      |
 857                        UVD_CGC_GATE__MPEG2_MASK     |
 858                        UVD_CGC_GATE__RBC_MASK       |
 859                        UVD_CGC_GATE__LMI_MC_MASK    |
 860                        UVD_CGC_GATE__LMI_UMC_MASK   |
 861                        UVD_CGC_GATE__IDCT_MASK      |
 862                        UVD_CGC_GATE__MPRD_MASK      |
 863                        UVD_CGC_GATE__MPC_MASK       |
 864                        UVD_CGC_GATE__LBSI_MASK      |
 865                        UVD_CGC_GATE__LRBBM_MASK     |
 866                        UVD_CGC_GATE__UDEC_RE_MASK   |
 867                        UVD_CGC_GATE__UDEC_CM_MASK   |
 868                        UVD_CGC_GATE__UDEC_IT_MASK   |
 869                        UVD_CGC_GATE__UDEC_DB_MASK   |
 870                        UVD_CGC_GATE__UDEC_MP_MASK   |
 871                        UVD_CGC_GATE__WCB_MASK       |
 872                        UVD_CGC_GATE__JPEG_MASK      |
 873                        UVD_CGC_GATE__SCPU_MASK      |
 874                        UVD_CGC_GATE__JPEG2_MASK);
 875                /* only in pg enabled, we can gate clock to vcpu*/
 876                if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
 877                        data3 |= UVD_CGC_GATE__VCPU_MASK;
 878
 879                data3 &= ~UVD_CGC_GATE__REGS_MASK;
 880        } else {
 881                data3 = 0;
 882        }
 883
 884        WREG32(mmUVD_SUVD_CGC_GATE, data1);
 885        WREG32(mmUVD_CGC_GATE, data3);
 886}
 887
 888static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
 889{
 890        uint32_t data, data2;
 891
 892        data = RREG32(mmUVD_CGC_CTRL);
 893        data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
 894
 895
 896        data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
 897                  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
 898
 899
 900        data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
 901                (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
 902                (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
 903
 904        data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
 905                        UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
 906                        UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
 907                        UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
 908                        UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
 909                        UVD_CGC_CTRL__SYS_MODE_MASK |
 910                        UVD_CGC_CTRL__UDEC_MODE_MASK |
 911                        UVD_CGC_CTRL__MPEG2_MODE_MASK |
 912                        UVD_CGC_CTRL__REGS_MODE_MASK |
 913                        UVD_CGC_CTRL__RBC_MODE_MASK |
 914                        UVD_CGC_CTRL__LMI_MC_MODE_MASK |
 915                        UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
 916                        UVD_CGC_CTRL__IDCT_MODE_MASK |
 917                        UVD_CGC_CTRL__MPRD_MODE_MASK |
 918                        UVD_CGC_CTRL__MPC_MODE_MASK |
 919                        UVD_CGC_CTRL__LBSI_MODE_MASK |
 920                        UVD_CGC_CTRL__LRBBM_MODE_MASK |
 921                        UVD_CGC_CTRL__WCB_MODE_MASK |
 922                        UVD_CGC_CTRL__VCPU_MODE_MASK |
 923                        UVD_CGC_CTRL__JPEG_MODE_MASK |
 924                        UVD_CGC_CTRL__SCPU_MODE_MASK |
 925                        UVD_CGC_CTRL__JPEG2_MODE_MASK);
 926        data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
 927                        UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
 928                        UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
 929                        UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
 930                        UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
 931
 932        WREG32(mmUVD_CGC_CTRL, data);
 933        WREG32(mmUVD_SUVD_CGC_CTRL, data2);
 934}
 935
 936#if 0
 937static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
 938{
 939        uint32_t data, data1, cgc_flags, suvd_flags;
 940
 941        data = RREG32(mmUVD_CGC_GATE);
 942        data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 943
 944        cgc_flags = UVD_CGC_GATE__SYS_MASK |
 945                UVD_CGC_GATE__UDEC_MASK |
 946                UVD_CGC_GATE__MPEG2_MASK |
 947                UVD_CGC_GATE__RBC_MASK |
 948                UVD_CGC_GATE__LMI_MC_MASK |
 949                UVD_CGC_GATE__IDCT_MASK |
 950                UVD_CGC_GATE__MPRD_MASK |
 951                UVD_CGC_GATE__MPC_MASK |
 952                UVD_CGC_GATE__LBSI_MASK |
 953                UVD_CGC_GATE__LRBBM_MASK |
 954                UVD_CGC_GATE__UDEC_RE_MASK |
 955                UVD_CGC_GATE__UDEC_CM_MASK |
 956                UVD_CGC_GATE__UDEC_IT_MASK |
 957                UVD_CGC_GATE__UDEC_DB_MASK |
 958                UVD_CGC_GATE__UDEC_MP_MASK |
 959                UVD_CGC_GATE__WCB_MASK |
 960                UVD_CGC_GATE__VCPU_MASK |
 961                UVD_CGC_GATE__SCPU_MASK |
 962                UVD_CGC_GATE__JPEG_MASK |
 963                UVD_CGC_GATE__JPEG2_MASK;
 964
 965        suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
 966                                UVD_SUVD_CGC_GATE__SIT_MASK |
 967                                UVD_SUVD_CGC_GATE__SMP_MASK |
 968                                UVD_SUVD_CGC_GATE__SCM_MASK |
 969                                UVD_SUVD_CGC_GATE__SDB_MASK;
 970
 971        data |= cgc_flags;
 972        data1 |= suvd_flags;
 973
 974        WREG32(mmUVD_CGC_GATE, data);
 975        WREG32(mmUVD_SUVD_CGC_GATE, data1);
 976}
 977#endif
 978
 979static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
 980                                 bool enable)
 981{
 982        u32 orig, data;
 983
 984        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 985                data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 986                data |= 0xfff;
 987                WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 988
 989                orig = data = RREG32(mmUVD_CGC_CTRL);
 990                data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 991                if (orig != data)
 992                        WREG32(mmUVD_CGC_CTRL, data);
 993        } else {
 994                data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 995                data &= ~0xfff;
 996                WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 997
 998                orig = data = RREG32(mmUVD_CGC_CTRL);
 999                data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1000                if (orig != data)
1001                        WREG32(mmUVD_CGC_CTRL, data);
1002        }
1003}
1004
1005static int uvd_v6_0_set_clockgating_state(void *handle,
1006                                          enum amd_clockgating_state state)
1007{
1008        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1009        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1010
1011        if (enable) {
1012                /* wait for STATUS to clear */
1013                if (uvd_v6_0_wait_for_idle(handle))
1014                        return -EBUSY;
1015                uvd_v6_0_enable_clock_gating(adev, true);
1016                /* enable HW gates because UVD is idle */
1017/*              uvd_v6_0_set_hw_clock_gating(adev); */
1018        } else {
1019                /* disable HW gating and enable Sw gating */
1020                uvd_v6_0_enable_clock_gating(adev, false);
1021        }
1022        uvd_v6_0_set_sw_clock_gating(adev);
1023        return 0;
1024}
1025
1026static int uvd_v6_0_set_powergating_state(void *handle,
1027                                          enum amd_powergating_state state)
1028{
1029        /* This doesn't actually powergate the UVD block.
1030         * That's done in the dpm code via the SMC.  This
1031         * just re-inits the block as necessary.  The actual
1032         * gating still happens in the dpm code.  We should
1033         * revisit this when there is a cleaner line between
1034         * the smc and the hw blocks
1035         */
1036        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1037        int ret = 0;
1038
1039        WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1040
1041        if (state == AMD_PG_STATE_GATE) {
1042                uvd_v6_0_stop(adev);
1043        } else {
1044                ret = uvd_v6_0_start(adev);
1045                if (ret)
1046                        goto out;
1047        }
1048
1049out:
1050        return ret;
1051}
1052
1053static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1054{
1055        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056        int data;
1057
1058        mutex_lock(&adev->pm.mutex);
1059
1060        if (adev->flags & AMD_IS_APU)
1061                data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1062        else
1063                data = RREG32_SMC(ixCURRENT_PG_STATUS);
1064
1065        if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1066                DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1067                goto out;
1068        }
1069
1070        /* AMD_CG_SUPPORT_UVD_MGCG */
1071        data = RREG32(mmUVD_CGC_CTRL);
1072        if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1073                *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1074
1075out:
1076        mutex_unlock(&adev->pm.mutex);
1077}
1078
1079static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1080        .name = "uvd_v6_0",
1081        .early_init = uvd_v6_0_early_init,
1082        .late_init = NULL,
1083        .sw_init = uvd_v6_0_sw_init,
1084        .sw_fini = uvd_v6_0_sw_fini,
1085        .hw_init = uvd_v6_0_hw_init,
1086        .hw_fini = uvd_v6_0_hw_fini,
1087        .suspend = uvd_v6_0_suspend,
1088        .resume = uvd_v6_0_resume,
1089        .is_idle = uvd_v6_0_is_idle,
1090        .wait_for_idle = uvd_v6_0_wait_for_idle,
1091        .check_soft_reset = uvd_v6_0_check_soft_reset,
1092        .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1093        .soft_reset = uvd_v6_0_soft_reset,
1094        .post_soft_reset = uvd_v6_0_post_soft_reset,
1095        .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1096        .set_powergating_state = uvd_v6_0_set_powergating_state,
1097        .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1098};
1099
1100static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1101        .type = AMDGPU_RING_TYPE_UVD,
1102        .align_mask = 0xf,
1103        .nop = PACKET0(mmUVD_NO_OP, 0),
1104        .support_64bit_ptrs = false,
1105        .get_rptr = uvd_v6_0_ring_get_rptr,
1106        .get_wptr = uvd_v6_0_ring_get_wptr,
1107        .set_wptr = uvd_v6_0_ring_set_wptr,
1108        .parse_cs = amdgpu_uvd_ring_parse_cs,
1109        .emit_frame_size =
1110                2 + /* uvd_v6_0_ring_emit_hdp_flush */
1111                2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1112                10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1113                14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1114        .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1115        .emit_ib = uvd_v6_0_ring_emit_ib,
1116        .emit_fence = uvd_v6_0_ring_emit_fence,
1117        .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1118        .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1119        .test_ring = uvd_v6_0_ring_test_ring,
1120        .test_ib = amdgpu_uvd_ring_test_ib,
1121        .insert_nop = amdgpu_ring_insert_nop,
1122        .pad_ib = amdgpu_ring_generic_pad_ib,
1123        .begin_use = amdgpu_uvd_ring_begin_use,
1124        .end_use = amdgpu_uvd_ring_end_use,
1125};
1126
1127static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1128        .type = AMDGPU_RING_TYPE_UVD,
1129        .align_mask = 0xf,
1130        .nop = PACKET0(mmUVD_NO_OP, 0),
1131        .support_64bit_ptrs = false,
1132        .get_rptr = uvd_v6_0_ring_get_rptr,
1133        .get_wptr = uvd_v6_0_ring_get_wptr,
1134        .set_wptr = uvd_v6_0_ring_set_wptr,
1135        .emit_frame_size =
1136                2 + /* uvd_v6_0_ring_emit_hdp_flush */
1137                2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1138                10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1139                20 + /* uvd_v6_0_ring_emit_vm_flush */
1140                14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1141        .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1142        .emit_ib = uvd_v6_0_ring_emit_ib,
1143        .emit_fence = uvd_v6_0_ring_emit_fence,
1144        .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1145        .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1146        .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1147        .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1148        .test_ring = uvd_v6_0_ring_test_ring,
1149        .test_ib = amdgpu_uvd_ring_test_ib,
1150        .insert_nop = amdgpu_ring_insert_nop,
1151        .pad_ib = amdgpu_ring_generic_pad_ib,
1152        .begin_use = amdgpu_uvd_ring_begin_use,
1153        .end_use = amdgpu_uvd_ring_end_use,
1154};
1155
1156static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1157{
1158        if (adev->asic_type >= CHIP_POLARIS10) {
1159                adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1160                DRM_INFO("UVD is enabled in VM mode\n");
1161        } else {
1162                adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1163                DRM_INFO("UVD is enabled in physical mode\n");
1164        }
1165}
1166
1167static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1168        .set = uvd_v6_0_set_interrupt_state,
1169        .process = uvd_v6_0_process_interrupt,
1170};
1171
1172static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1173{
1174        adev->uvd.irq.num_types = 1;
1175        adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1176}
1177
1178const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1179{
1180                .type = AMD_IP_BLOCK_TYPE_UVD,
1181                .major = 6,
1182                .minor = 0,
1183                .rev = 0,
1184                .funcs = &uvd_v6_0_ip_funcs,
1185};
1186
1187const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1188{
1189                .type = AMD_IP_BLOCK_TYPE_UVD,
1190                .major = 6,
1191                .minor = 2,
1192                .rev = 0,
1193                .funcs = &uvd_v6_0_ip_funcs,
1194};
1195
1196const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1197{
1198                .type = AMD_IP_BLOCK_TYPE_UVD,
1199                .major = 6,
1200                .minor = 3,
1201                .rev = 0,
1202                .funcs = &uvd_v6_0_ip_funcs,
1203};
1204