linux/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König <christian.koenig@amd.com>
  23 */
  24
  25#include <linux/firmware.h>
  26#include <drm/drmP.h>
  27#include "amdgpu.h"
  28#include "amdgpu_uvd.h"
  29#include "vid.h"
  30#include "uvd/uvd_5_0_d.h"
  31#include "uvd/uvd_5_0_sh_mask.h"
  32#include "oss/oss_2_0_d.h"
  33#include "oss/oss_2_0_sh_mask.h"
  34
  35static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
  36static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
  37static int uvd_v5_0_start(struct amdgpu_device *adev);
  38static void uvd_v5_0_stop(struct amdgpu_device *adev);
  39
  40/**
  41 * uvd_v5_0_ring_get_rptr - get read pointer
  42 *
  43 * @ring: amdgpu_ring pointer
  44 *
  45 * Returns the current hardware read pointer
  46 */
  47static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
  48{
  49        struct amdgpu_device *adev = ring->adev;
  50
  51        return RREG32(mmUVD_RBC_RB_RPTR);
  52}
  53
  54/**
  55 * uvd_v5_0_ring_get_wptr - get write pointer
  56 *
  57 * @ring: amdgpu_ring pointer
  58 *
  59 * Returns the current hardware write pointer
  60 */
  61static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
  62{
  63        struct amdgpu_device *adev = ring->adev;
  64
  65        return RREG32(mmUVD_RBC_RB_WPTR);
  66}
  67
  68/**
  69 * uvd_v5_0_ring_set_wptr - set write pointer
  70 *
  71 * @ring: amdgpu_ring pointer
  72 *
  73 * Commits the write pointer to the hardware
  74 */
  75static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
  76{
  77        struct amdgpu_device *adev = ring->adev;
  78
  79        WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
  80}
  81
  82static int uvd_v5_0_early_init(void *handle)
  83{
  84        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  85
  86        uvd_v5_0_set_ring_funcs(adev);
  87        uvd_v5_0_set_irq_funcs(adev);
  88
  89        return 0;
  90}
  91
  92static int uvd_v5_0_sw_init(void *handle)
  93{
  94        struct amdgpu_ring *ring;
  95        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  96        int r;
  97
  98        /* UVD TRAP */
  99        r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
 100        if (r)
 101                return r;
 102
 103        r = amdgpu_uvd_sw_init(adev);
 104        if (r)
 105                return r;
 106
 107        r = amdgpu_uvd_resume(adev);
 108        if (r)
 109                return r;
 110
 111        ring = &adev->uvd.ring;
 112        sprintf(ring->name, "uvd");
 113        r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
 114                             &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 115
 116        return r;
 117}
 118
 119static int uvd_v5_0_sw_fini(void *handle)
 120{
 121        int r;
 122        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 123
 124        r = amdgpu_uvd_suspend(adev);
 125        if (r)
 126                return r;
 127
 128        r = amdgpu_uvd_sw_fini(adev);
 129        if (r)
 130                return r;
 131
 132        return r;
 133}
 134
 135/**
 136 * uvd_v5_0_hw_init - start and test UVD block
 137 *
 138 * @adev: amdgpu_device pointer
 139 *
 140 * Initialize the hardware, boot up the VCPU and do some testing
 141 */
 142static int uvd_v5_0_hw_init(void *handle)
 143{
 144        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 145        struct amdgpu_ring *ring = &adev->uvd.ring;
 146        uint32_t tmp;
 147        int r;
 148
 149        /* raise clocks while booting up the VCPU */
 150        amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
 151
 152        r = uvd_v5_0_start(adev);
 153        if (r)
 154                goto done;
 155
 156        ring->ready = true;
 157        r = amdgpu_ring_test_ring(ring);
 158        if (r) {
 159                ring->ready = false;
 160                goto done;
 161        }
 162
 163        r = amdgpu_ring_lock(ring, 10);
 164        if (r) {
 165                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 166                goto done;
 167        }
 168
 169        tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 170        amdgpu_ring_write(ring, tmp);
 171        amdgpu_ring_write(ring, 0xFFFFF);
 172
 173        tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 174        amdgpu_ring_write(ring, tmp);
 175        amdgpu_ring_write(ring, 0xFFFFF);
 176
 177        tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 178        amdgpu_ring_write(ring, tmp);
 179        amdgpu_ring_write(ring, 0xFFFFF);
 180
 181        /* Clear timeout status bits */
 182        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 183        amdgpu_ring_write(ring, 0x8);
 184
 185        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 186        amdgpu_ring_write(ring, 3);
 187
 188        amdgpu_ring_unlock_commit(ring);
 189
 190done:
 191        /* lower clocks again */
 192        amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 193
 194        if (!r)
 195                DRM_INFO("UVD initialized successfully.\n");
 196
 197        return r;
 198}
 199
 200/**
 201 * uvd_v5_0_hw_fini - stop the hardware block
 202 *
 203 * @adev: amdgpu_device pointer
 204 *
 205 * Stop the UVD block, mark ring as not ready any more
 206 */
 207static int uvd_v5_0_hw_fini(void *handle)
 208{
 209        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 210        struct amdgpu_ring *ring = &adev->uvd.ring;
 211
 212        uvd_v5_0_stop(adev);
 213        ring->ready = false;
 214
 215        return 0;
 216}
 217
 218static int uvd_v5_0_suspend(void *handle)
 219{
 220        int r;
 221        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 222
 223        r = amdgpu_uvd_suspend(adev);
 224        if (r)
 225                return r;
 226
 227        r = uvd_v5_0_hw_fini(adev);
 228        if (r)
 229                return r;
 230
 231        return r;
 232}
 233
 234static int uvd_v5_0_resume(void *handle)
 235{
 236        int r;
 237        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 238
 239        r = amdgpu_uvd_resume(adev);
 240        if (r)
 241                return r;
 242
 243        r = uvd_v5_0_hw_init(adev);
 244        if (r)
 245                return r;
 246
 247        return r;
 248}
 249
 250/**
 251 * uvd_v5_0_mc_resume - memory controller programming
 252 *
 253 * @adev: amdgpu_device pointer
 254 *
 255 * Let the UVD memory controller know it's offsets
 256 */
 257static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
 258{
 259        uint64_t offset;
 260        uint32_t size;
 261
 262        /* programm memory controller bits 0-27 */
 263        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 264                        lower_32_bits(adev->uvd.gpu_addr));
 265        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 266                        upper_32_bits(adev->uvd.gpu_addr));
 267
 268        offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 269        size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 270        WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
 271        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 272
 273        offset += size;
 274        size = AMDGPU_UVD_STACK_SIZE;
 275        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 276        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 277
 278        offset += size;
 279        size = AMDGPU_UVD_HEAP_SIZE;
 280        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 281        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 282}
 283
 284/**
 285 * uvd_v5_0_start - start UVD block
 286 *
 287 * @adev: amdgpu_device pointer
 288 *
 289 * Setup and start the UVD block
 290 */
 291static int uvd_v5_0_start(struct amdgpu_device *adev)
 292{
 293        struct amdgpu_ring *ring = &adev->uvd.ring;
 294        uint32_t rb_bufsz, tmp;
 295        uint32_t lmi_swap_cntl;
 296        uint32_t mp_swap_cntl;
 297        int i, j, r;
 298
 299        /*disable DPG */
 300        WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
 301
 302        /* disable byte swapping */
 303        lmi_swap_cntl = 0;
 304        mp_swap_cntl = 0;
 305
 306        uvd_v5_0_mc_resume(adev);
 307
 308        /* disable clock gating */
 309        WREG32(mmUVD_CGC_GATE, 0);
 310
 311        /* disable interupt */
 312        WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 313
 314        /* stall UMC and register bus before resetting VCPU */
 315        WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 316        mdelay(1);
 317
 318        /* put LMI, VCPU, RBC etc... into reset */
 319        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 320                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 321                UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 322                UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 323                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 324        mdelay(5);
 325
 326        /* take UVD block out of reset */
 327        WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 328        mdelay(5);
 329
 330        /* initialize UVD memory controller */
 331        WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
 332                             (1 << 21) | (1 << 9) | (1 << 20));
 333
 334#ifdef __BIG_ENDIAN
 335        /* swap (8 in 32) RB and IB */
 336        lmi_swap_cntl = 0xa;
 337        mp_swap_cntl = 0;
 338#endif
 339        WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 340        WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 341
 342        WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 343        WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 344        WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 345        WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 346        WREG32(mmUVD_MPC_SET_ALU, 0);
 347        WREG32(mmUVD_MPC_SET_MUX, 0x88);
 348
 349        /* take all subblocks out of reset, except VCPU */
 350        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 351        mdelay(5);
 352
 353        /* enable VCPU clock */
 354        WREG32(mmUVD_VCPU_CNTL,  1 << 9);
 355
 356        /* enable UMC */
 357        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 358
 359        /* boot up the VCPU */
 360        WREG32(mmUVD_SOFT_RESET, 0);
 361        mdelay(10);
 362
 363        for (i = 0; i < 10; ++i) {
 364                uint32_t status;
 365                for (j = 0; j < 100; ++j) {
 366                        status = RREG32(mmUVD_STATUS);
 367                        if (status & 2)
 368                                break;
 369                        mdelay(10);
 370                }
 371                r = 0;
 372                if (status & 2)
 373                        break;
 374
 375                DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 376                WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 377                                ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 378                mdelay(10);
 379                WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 380                mdelay(10);
 381                r = -1;
 382        }
 383
 384        if (r) {
 385                DRM_ERROR("UVD not responding, giving up!!!\n");
 386                return r;
 387        }
 388        /* enable master interrupt */
 389        WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
 390
 391        /* clear the bit 4 of UVD_STATUS */
 392        WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
 393
 394        rb_bufsz = order_base_2(ring->ring_size);
 395        tmp = 0;
 396        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 397        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 398        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 399        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 400        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 401        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 402        /* force RBC into idle state */
 403        WREG32(mmUVD_RBC_RB_CNTL, tmp);
 404
 405        /* set the write pointer delay */
 406        WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 407
 408        /* set the wb address */
 409        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 410
 411        /* programm the RB_BASE for ring buffer */
 412        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 413                        lower_32_bits(ring->gpu_addr));
 414        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 415                        upper_32_bits(ring->gpu_addr));
 416
 417        /* Initialize the ring buffer's read and write pointers */
 418        WREG32(mmUVD_RBC_RB_RPTR, 0);
 419
 420        ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 421        WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 422
 423        WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 424
 425        return 0;
 426}
 427
 428/**
 429 * uvd_v5_0_stop - stop UVD block
 430 *
 431 * @adev: amdgpu_device pointer
 432 *
 433 * stop the UVD block
 434 */
 435static void uvd_v5_0_stop(struct amdgpu_device *adev)
 436{
 437        /* force RBC into idle state */
 438        WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 439
 440        /* Stall UMC and register bus before resetting VCPU */
 441        WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 442        mdelay(1);
 443
 444        /* put VCPU into reset */
 445        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 446        mdelay(5);
 447
 448        /* disable VCPU clock */
 449        WREG32(mmUVD_VCPU_CNTL, 0x0);
 450
 451        /* Unstall UMC and register bus */
 452        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 453}
 454
 455/**
 456 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
 457 *
 458 * @ring: amdgpu_ring pointer
 459 * @fence: fence to emit
 460 *
 461 * Write a fence and a trap command to the ring.
 462 */
 463static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 464                                     unsigned flags)
 465{
 466        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 467
 468        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 469        amdgpu_ring_write(ring, seq);
 470        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 471        amdgpu_ring_write(ring, addr & 0xffffffff);
 472        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 473        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 474        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 475        amdgpu_ring_write(ring, 0);
 476
 477        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 478        amdgpu_ring_write(ring, 0);
 479        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 480        amdgpu_ring_write(ring, 0);
 481        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 482        amdgpu_ring_write(ring, 2);
 483}
 484
 485/**
 486 * uvd_v5_0_ring_emit_semaphore - emit semaphore command
 487 *
 488 * @ring: amdgpu_ring pointer
 489 * @semaphore: semaphore to emit commands for
 490 * @emit_wait: true if we should emit a wait command
 491 *
 492 * Emit a semaphore command (either wait or signal) to the UVD ring.
 493 */
 494static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring,
 495                                         struct amdgpu_semaphore *semaphore,
 496                                         bool emit_wait)
 497{
 498        uint64_t addr = semaphore->gpu_addr;
 499
 500        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0));
 501        amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
 502
 503        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0));
 504        amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
 505
 506        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0));
 507        amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
 508
 509        return true;
 510}
 511
 512/**
 513 * uvd_v5_0_ring_test_ring - register write test
 514 *
 515 * @ring: amdgpu_ring pointer
 516 *
 517 * Test if we can successfully write to the context register
 518 */
 519static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 520{
 521        struct amdgpu_device *adev = ring->adev;
 522        uint32_t tmp = 0;
 523        unsigned i;
 524        int r;
 525
 526        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 527        r = amdgpu_ring_lock(ring, 3);
 528        if (r) {
 529                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 530                          ring->idx, r);
 531                return r;
 532        }
 533        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 534        amdgpu_ring_write(ring, 0xDEADBEEF);
 535        amdgpu_ring_unlock_commit(ring);
 536        for (i = 0; i < adev->usec_timeout; i++) {
 537                tmp = RREG32(mmUVD_CONTEXT_ID);
 538                if (tmp == 0xDEADBEEF)
 539                        break;
 540                DRM_UDELAY(1);
 541        }
 542
 543        if (i < adev->usec_timeout) {
 544                DRM_INFO("ring test on %d succeeded in %d usecs\n",
 545                         ring->idx, i);
 546        } else {
 547                DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 548                          ring->idx, tmp);
 549                r = -EINVAL;
 550        }
 551        return r;
 552}
 553
 554/**
 555 * uvd_v5_0_ring_emit_ib - execute indirect buffer
 556 *
 557 * @ring: amdgpu_ring pointer
 558 * @ib: indirect buffer to execute
 559 *
 560 * Write ring commands to execute the indirect buffer
 561 */
 562static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 563                                  struct amdgpu_ib *ib)
 564{
 565        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 566        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 567        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
 568        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 569        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
 570        amdgpu_ring_write(ring, ib->length_dw);
 571}
 572
 573/**
 574 * uvd_v5_0_ring_test_ib - test ib execution
 575 *
 576 * @ring: amdgpu_ring pointer
 577 *
 578 * Test if we can successfully execute an IB
 579 */
 580static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
 581{
 582        struct amdgpu_device *adev = ring->adev;
 583        struct fence *fence = NULL;
 584        int r;
 585
 586        r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
 587        if (r) {
 588                DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
 589                return r;
 590        }
 591
 592        r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
 593        if (r) {
 594                DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
 595                goto error;
 596        }
 597
 598        r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence);
 599        if (r) {
 600                DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
 601                goto error;
 602        }
 603
 604        r = fence_wait(fence, false);
 605        if (r) {
 606                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 607                goto error;
 608        }
 609        DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 610error:
 611        fence_put(fence);
 612        amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 613        return r;
 614}
 615
 616static bool uvd_v5_0_is_idle(void *handle)
 617{
 618        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 619
 620        return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 621}
 622
 623static int uvd_v5_0_wait_for_idle(void *handle)
 624{
 625        unsigned i;
 626        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 627
 628        for (i = 0; i < adev->usec_timeout; i++) {
 629                if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
 630                        return 0;
 631        }
 632        return -ETIMEDOUT;
 633}
 634
 635static int uvd_v5_0_soft_reset(void *handle)
 636{
 637        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 638
 639        uvd_v5_0_stop(adev);
 640
 641        WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
 642                        ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 643        mdelay(5);
 644
 645        return uvd_v5_0_start(adev);
 646}
 647
 648static void uvd_v5_0_print_status(void *handle)
 649{
 650        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 651        dev_info(adev->dev, "UVD 5.0 registers\n");
 652        dev_info(adev->dev, "  UVD_SEMA_ADDR_LOW=0x%08X\n",
 653                 RREG32(mmUVD_SEMA_ADDR_LOW));
 654        dev_info(adev->dev, "  UVD_SEMA_ADDR_HIGH=0x%08X\n",
 655                 RREG32(mmUVD_SEMA_ADDR_HIGH));
 656        dev_info(adev->dev, "  UVD_SEMA_CMD=0x%08X\n",
 657                 RREG32(mmUVD_SEMA_CMD));
 658        dev_info(adev->dev, "  UVD_GPCOM_VCPU_CMD=0x%08X\n",
 659                 RREG32(mmUVD_GPCOM_VCPU_CMD));
 660        dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA0=0x%08X\n",
 661                 RREG32(mmUVD_GPCOM_VCPU_DATA0));
 662        dev_info(adev->dev, "  UVD_GPCOM_VCPU_DATA1=0x%08X\n",
 663                 RREG32(mmUVD_GPCOM_VCPU_DATA1));
 664        dev_info(adev->dev, "  UVD_ENGINE_CNTL=0x%08X\n",
 665                 RREG32(mmUVD_ENGINE_CNTL));
 666        dev_info(adev->dev, "  UVD_UDEC_ADDR_CONFIG=0x%08X\n",
 667                 RREG32(mmUVD_UDEC_ADDR_CONFIG));
 668        dev_info(adev->dev, "  UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
 669                 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
 670        dev_info(adev->dev, "  UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
 671                 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
 672        dev_info(adev->dev, "  UVD_SEMA_CNTL=0x%08X\n",
 673                 RREG32(mmUVD_SEMA_CNTL));
 674        dev_info(adev->dev, "  UVD_LMI_EXT40_ADDR=0x%08X\n",
 675                 RREG32(mmUVD_LMI_EXT40_ADDR));
 676        dev_info(adev->dev, "  UVD_CTX_INDEX=0x%08X\n",
 677                 RREG32(mmUVD_CTX_INDEX));
 678        dev_info(adev->dev, "  UVD_CTX_DATA=0x%08X\n",
 679                 RREG32(mmUVD_CTX_DATA));
 680        dev_info(adev->dev, "  UVD_CGC_GATE=0x%08X\n",
 681                 RREG32(mmUVD_CGC_GATE));
 682        dev_info(adev->dev, "  UVD_CGC_CTRL=0x%08X\n",
 683                 RREG32(mmUVD_CGC_CTRL));
 684        dev_info(adev->dev, "  UVD_LMI_CTRL2=0x%08X\n",
 685                 RREG32(mmUVD_LMI_CTRL2));
 686        dev_info(adev->dev, "  UVD_MASTINT_EN=0x%08X\n",
 687                 RREG32(mmUVD_MASTINT_EN));
 688        dev_info(adev->dev, "  UVD_LMI_ADDR_EXT=0x%08X\n",
 689                 RREG32(mmUVD_LMI_ADDR_EXT));
 690        dev_info(adev->dev, "  UVD_LMI_CTRL=0x%08X\n",
 691                 RREG32(mmUVD_LMI_CTRL));
 692        dev_info(adev->dev, "  UVD_LMI_SWAP_CNTL=0x%08X\n",
 693                 RREG32(mmUVD_LMI_SWAP_CNTL));
 694        dev_info(adev->dev, "  UVD_MP_SWAP_CNTL=0x%08X\n",
 695                 RREG32(mmUVD_MP_SWAP_CNTL));
 696        dev_info(adev->dev, "  UVD_MPC_SET_MUXA0=0x%08X\n",
 697                 RREG32(mmUVD_MPC_SET_MUXA0));
 698        dev_info(adev->dev, "  UVD_MPC_SET_MUXA1=0x%08X\n",
 699                 RREG32(mmUVD_MPC_SET_MUXA1));
 700        dev_info(adev->dev, "  UVD_MPC_SET_MUXB0=0x%08X\n",
 701                 RREG32(mmUVD_MPC_SET_MUXB0));
 702        dev_info(adev->dev, "  UVD_MPC_SET_MUXB1=0x%08X\n",
 703                 RREG32(mmUVD_MPC_SET_MUXB1));
 704        dev_info(adev->dev, "  UVD_MPC_SET_MUX=0x%08X\n",
 705                 RREG32(mmUVD_MPC_SET_MUX));
 706        dev_info(adev->dev, "  UVD_MPC_SET_ALU=0x%08X\n",
 707                 RREG32(mmUVD_MPC_SET_ALU));
 708        dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
 709                 RREG32(mmUVD_VCPU_CACHE_OFFSET0));
 710        dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE0=0x%08X\n",
 711                 RREG32(mmUVD_VCPU_CACHE_SIZE0));
 712        dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
 713                 RREG32(mmUVD_VCPU_CACHE_OFFSET1));
 714        dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE1=0x%08X\n",
 715                 RREG32(mmUVD_VCPU_CACHE_SIZE1));
 716        dev_info(adev->dev, "  UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
 717                 RREG32(mmUVD_VCPU_CACHE_OFFSET2));
 718        dev_info(adev->dev, "  UVD_VCPU_CACHE_SIZE2=0x%08X\n",
 719                 RREG32(mmUVD_VCPU_CACHE_SIZE2));
 720        dev_info(adev->dev, "  UVD_VCPU_CNTL=0x%08X\n",
 721                 RREG32(mmUVD_VCPU_CNTL));
 722        dev_info(adev->dev, "  UVD_SOFT_RESET=0x%08X\n",
 723                 RREG32(mmUVD_SOFT_RESET));
 724        dev_info(adev->dev, "  UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n",
 725                 RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW));
 726        dev_info(adev->dev, "  UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n",
 727                 RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH));
 728        dev_info(adev->dev, "  UVD_RBC_IB_SIZE=0x%08X\n",
 729                 RREG32(mmUVD_RBC_IB_SIZE));
 730        dev_info(adev->dev, "  UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n",
 731                 RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW));
 732        dev_info(adev->dev, "  UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n",
 733                 RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH));
 734        dev_info(adev->dev, "  UVD_RBC_RB_RPTR=0x%08X\n",
 735                 RREG32(mmUVD_RBC_RB_RPTR));
 736        dev_info(adev->dev, "  UVD_RBC_RB_WPTR=0x%08X\n",
 737                 RREG32(mmUVD_RBC_RB_WPTR));
 738        dev_info(adev->dev, "  UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
 739                 RREG32(mmUVD_RBC_RB_WPTR_CNTL));
 740        dev_info(adev->dev, "  UVD_RBC_RB_CNTL=0x%08X\n",
 741                 RREG32(mmUVD_RBC_RB_CNTL));
 742        dev_info(adev->dev, "  UVD_STATUS=0x%08X\n",
 743                 RREG32(mmUVD_STATUS));
 744        dev_info(adev->dev, "  UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
 745                 RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
 746        dev_info(adev->dev, "  UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
 747                 RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
 748        dev_info(adev->dev, "  UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
 749                 RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
 750        dev_info(adev->dev, "  UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
 751                 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
 752        dev_info(adev->dev, "  UVD_CONTEXT_ID=0x%08X\n",
 753                 RREG32(mmUVD_CONTEXT_ID));
 754}
 755
 756static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
 757                                        struct amdgpu_irq_src *source,
 758                                        unsigned type,
 759                                        enum amdgpu_interrupt_state state)
 760{
 761        // TODO
 762        return 0;
 763}
 764
 765static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
 766                                      struct amdgpu_irq_src *source,
 767                                      struct amdgpu_iv_entry *entry)
 768{
 769        DRM_DEBUG("IH: UVD TRAP\n");
 770        amdgpu_fence_process(&adev->uvd.ring);
 771        return 0;
 772}
 773
 774static int uvd_v5_0_set_clockgating_state(void *handle,
 775                                          enum amd_clockgating_state state)
 776{
 777        return 0;
 778}
 779
 780static int uvd_v5_0_set_powergating_state(void *handle,
 781                                          enum amd_powergating_state state)
 782{
 783        /* This doesn't actually powergate the UVD block.
 784         * That's done in the dpm code via the SMC.  This
 785         * just re-inits the block as necessary.  The actual
 786         * gating still happens in the dpm code.  We should
 787         * revisit this when there is a cleaner line between
 788         * the smc and the hw blocks
 789         */
 790        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 791
 792        if (state == AMD_PG_STATE_GATE) {
 793                uvd_v5_0_stop(adev);
 794                return 0;
 795        } else {
 796                return uvd_v5_0_start(adev);
 797        }
 798}
 799
 800const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 801        .early_init = uvd_v5_0_early_init,
 802        .late_init = NULL,
 803        .sw_init = uvd_v5_0_sw_init,
 804        .sw_fini = uvd_v5_0_sw_fini,
 805        .hw_init = uvd_v5_0_hw_init,
 806        .hw_fini = uvd_v5_0_hw_fini,
 807        .suspend = uvd_v5_0_suspend,
 808        .resume = uvd_v5_0_resume,
 809        .is_idle = uvd_v5_0_is_idle,
 810        .wait_for_idle = uvd_v5_0_wait_for_idle,
 811        .soft_reset = uvd_v5_0_soft_reset,
 812        .print_status = uvd_v5_0_print_status,
 813        .set_clockgating_state = uvd_v5_0_set_clockgating_state,
 814        .set_powergating_state = uvd_v5_0_set_powergating_state,
 815};
 816
 817static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 818        .get_rptr = uvd_v5_0_ring_get_rptr,
 819        .get_wptr = uvd_v5_0_ring_get_wptr,
 820        .set_wptr = uvd_v5_0_ring_set_wptr,
 821        .parse_cs = amdgpu_uvd_ring_parse_cs,
 822        .emit_ib = uvd_v5_0_ring_emit_ib,
 823        .emit_fence = uvd_v5_0_ring_emit_fence,
 824        .emit_semaphore = uvd_v5_0_ring_emit_semaphore,
 825        .test_ring = uvd_v5_0_ring_test_ring,
 826        .test_ib = uvd_v5_0_ring_test_ib,
 827        .insert_nop = amdgpu_ring_insert_nop,
 828};
 829
 830static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
 831{
 832        adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
 833}
 834
 835static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
 836        .set = uvd_v5_0_set_interrupt_state,
 837        .process = uvd_v5_0_process_interrupt,
 838};
 839
 840static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 841{
 842        adev->uvd.irq.num_types = 1;
 843        adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
 844}
 845