linux/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König <christian.koenig@amd.com>
  23 */
  24
  25#include <linux/firmware.h>
  26#include <drm/drmP.h>
  27#include "amdgpu.h"
  28#include "amdgpu_uvd.h"
  29#include "vid.h"
  30#include "uvd/uvd_5_0_d.h"
  31#include "uvd/uvd_5_0_sh_mask.h"
  32#include "oss/oss_2_0_d.h"
  33#include "oss/oss_2_0_sh_mask.h"
  34#include "bif/bif_5_0_d.h"
  35#include "vi.h"
  36
  37static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
  38static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
  39static int uvd_v5_0_start(struct amdgpu_device *adev);
  40static void uvd_v5_0_stop(struct amdgpu_device *adev);
  41
  42/**
  43 * uvd_v5_0_ring_get_rptr - get read pointer
  44 *
  45 * @ring: amdgpu_ring pointer
  46 *
  47 * Returns the current hardware read pointer
  48 */
  49static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
  50{
  51        struct amdgpu_device *adev = ring->adev;
  52
  53        return RREG32(mmUVD_RBC_RB_RPTR);
  54}
  55
  56/**
  57 * uvd_v5_0_ring_get_wptr - get write pointer
  58 *
  59 * @ring: amdgpu_ring pointer
  60 *
  61 * Returns the current hardware write pointer
  62 */
  63static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
  64{
  65        struct amdgpu_device *adev = ring->adev;
  66
  67        return RREG32(mmUVD_RBC_RB_WPTR);
  68}
  69
  70/**
  71 * uvd_v5_0_ring_set_wptr - set write pointer
  72 *
  73 * @ring: amdgpu_ring pointer
  74 *
  75 * Commits the write pointer to the hardware
  76 */
  77static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
  78{
  79        struct amdgpu_device *adev = ring->adev;
  80
  81        WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
  82}
  83
  84static int uvd_v5_0_early_init(void *handle)
  85{
  86        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  87
  88        uvd_v5_0_set_ring_funcs(adev);
  89        uvd_v5_0_set_irq_funcs(adev);
  90
  91        return 0;
  92}
  93
  94static int uvd_v5_0_sw_init(void *handle)
  95{
  96        struct amdgpu_ring *ring;
  97        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  98        int r;
  99
 100        /* UVD TRAP */
 101        r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
 102        if (r)
 103                return r;
 104
 105        r = amdgpu_uvd_sw_init(adev);
 106        if (r)
 107                return r;
 108
 109        r = amdgpu_uvd_resume(adev);
 110        if (r)
 111                return r;
 112
 113        ring = &adev->uvd.ring;
 114        sprintf(ring->name, "uvd");
 115        r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf,
 116                             &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
 117
 118        return r;
 119}
 120
 121static int uvd_v5_0_sw_fini(void *handle)
 122{
 123        int r;
 124        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 125
 126        r = amdgpu_uvd_suspend(adev);
 127        if (r)
 128                return r;
 129
 130        r = amdgpu_uvd_sw_fini(adev);
 131        if (r)
 132                return r;
 133
 134        return r;
 135}
 136
 137/**
 138 * uvd_v5_0_hw_init - start and test UVD block
 139 *
 140 * @adev: amdgpu_device pointer
 141 *
 142 * Initialize the hardware, boot up the VCPU and do some testing
 143 */
 144static int uvd_v5_0_hw_init(void *handle)
 145{
 146        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 147        struct amdgpu_ring *ring = &adev->uvd.ring;
 148        uint32_t tmp;
 149        int r;
 150
 151        /* raise clocks while booting up the VCPU */
 152        amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
 153
 154        r = uvd_v5_0_start(adev);
 155        if (r)
 156                goto done;
 157
 158        ring->ready = true;
 159        r = amdgpu_ring_test_ring(ring);
 160        if (r) {
 161                ring->ready = false;
 162                goto done;
 163        }
 164
 165        r = amdgpu_ring_alloc(ring, 10);
 166        if (r) {
 167                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 168                goto done;
 169        }
 170
 171        tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 172        amdgpu_ring_write(ring, tmp);
 173        amdgpu_ring_write(ring, 0xFFFFF);
 174
 175        tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 176        amdgpu_ring_write(ring, tmp);
 177        amdgpu_ring_write(ring, 0xFFFFF);
 178
 179        tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 180        amdgpu_ring_write(ring, tmp);
 181        amdgpu_ring_write(ring, 0xFFFFF);
 182
 183        /* Clear timeout status bits */
 184        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 185        amdgpu_ring_write(ring, 0x8);
 186
 187        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 188        amdgpu_ring_write(ring, 3);
 189
 190        amdgpu_ring_commit(ring);
 191
 192done:
 193        /* lower clocks again */
 194        amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 195
 196        if (!r)
 197                DRM_INFO("UVD initialized successfully.\n");
 198
 199        return r;
 200}
 201
 202/**
 203 * uvd_v5_0_hw_fini - stop the hardware block
 204 *
 205 * @adev: amdgpu_device pointer
 206 *
 207 * Stop the UVD block, mark ring as not ready any more
 208 */
 209static int uvd_v5_0_hw_fini(void *handle)
 210{
 211        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 212        struct amdgpu_ring *ring = &adev->uvd.ring;
 213
 214        uvd_v5_0_stop(adev);
 215        ring->ready = false;
 216
 217        return 0;
 218}
 219
 220static int uvd_v5_0_suspend(void *handle)
 221{
 222        int r;
 223        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 224
 225        r = uvd_v5_0_hw_fini(adev);
 226        if (r)
 227                return r;
 228
 229        r = amdgpu_uvd_suspend(adev);
 230        if (r)
 231                return r;
 232
 233        return r;
 234}
 235
 236static int uvd_v5_0_resume(void *handle)
 237{
 238        int r;
 239        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 240
 241        r = amdgpu_uvd_resume(adev);
 242        if (r)
 243                return r;
 244
 245        r = uvd_v5_0_hw_init(adev);
 246        if (r)
 247                return r;
 248
 249        return r;
 250}
 251
 252/**
 253 * uvd_v5_0_mc_resume - memory controller programming
 254 *
 255 * @adev: amdgpu_device pointer
 256 *
 257 * Let the UVD memory controller know it's offsets
 258 */
 259static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
 260{
 261        uint64_t offset;
 262        uint32_t size;
 263
 264        /* programm memory controller bits 0-27 */
 265        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 266                        lower_32_bits(adev->uvd.gpu_addr));
 267        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 268                        upper_32_bits(adev->uvd.gpu_addr));
 269
 270        offset = AMDGPU_UVD_FIRMWARE_OFFSET;
 271        size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 272        WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
 273        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 274
 275        offset += size;
 276        size = AMDGPU_UVD_HEAP_SIZE;
 277        WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
 278        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 279
 280        offset += size;
 281        size = AMDGPU_UVD_STACK_SIZE +
 282               (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
 283        WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
 284        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 285
 286        WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 287        WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 288        WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 289}
 290
 291/**
 292 * uvd_v5_0_start - start UVD block
 293 *
 294 * @adev: amdgpu_device pointer
 295 *
 296 * Setup and start the UVD block
 297 */
 298static int uvd_v5_0_start(struct amdgpu_device *adev)
 299{
 300        struct amdgpu_ring *ring = &adev->uvd.ring;
 301        uint32_t rb_bufsz, tmp;
 302        uint32_t lmi_swap_cntl;
 303        uint32_t mp_swap_cntl;
 304        int i, j, r;
 305
 306        /*disable DPG */
 307        WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
 308
 309        /* disable byte swapping */
 310        lmi_swap_cntl = 0;
 311        mp_swap_cntl = 0;
 312
 313        uvd_v5_0_mc_resume(adev);
 314
 315        /* disable clock gating */
 316        WREG32(mmUVD_CGC_GATE, 0);
 317
 318        /* disable interupt */
 319        WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 320
 321        /* stall UMC and register bus before resetting VCPU */
 322        WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 323        mdelay(1);
 324
 325        /* put LMI, VCPU, RBC etc... into reset */
 326        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 327                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 328                UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 329                UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 330                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 331        mdelay(5);
 332
 333        /* take UVD block out of reset */
 334        WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 335        mdelay(5);
 336
 337        /* initialize UVD memory controller */
 338        WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
 339                             (1 << 21) | (1 << 9) | (1 << 20));
 340
 341#ifdef __BIG_ENDIAN
 342        /* swap (8 in 32) RB and IB */
 343        lmi_swap_cntl = 0xa;
 344        mp_swap_cntl = 0;
 345#endif
 346        WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 347        WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 348
 349        WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 350        WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 351        WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 352        WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 353        WREG32(mmUVD_MPC_SET_ALU, 0);
 354        WREG32(mmUVD_MPC_SET_MUX, 0x88);
 355
 356        /* take all subblocks out of reset, except VCPU */
 357        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 358        mdelay(5);
 359
 360        /* enable VCPU clock */
 361        WREG32(mmUVD_VCPU_CNTL,  1 << 9);
 362
 363        /* enable UMC */
 364        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 365
 366        /* boot up the VCPU */
 367        WREG32(mmUVD_SOFT_RESET, 0);
 368        mdelay(10);
 369
 370        for (i = 0; i < 10; ++i) {
 371                uint32_t status;
 372                for (j = 0; j < 100; ++j) {
 373                        status = RREG32(mmUVD_STATUS);
 374                        if (status & 2)
 375                                break;
 376                        mdelay(10);
 377                }
 378                r = 0;
 379                if (status & 2)
 380                        break;
 381
 382                DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 383                WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 384                                ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 385                mdelay(10);
 386                WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 387                mdelay(10);
 388                r = -1;
 389        }
 390
 391        if (r) {
 392                DRM_ERROR("UVD not responding, giving up!!!\n");
 393                return r;
 394        }
 395        /* enable master interrupt */
 396        WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
 397
 398        /* clear the bit 4 of UVD_STATUS */
 399        WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
 400
 401        rb_bufsz = order_base_2(ring->ring_size);
 402        tmp = 0;
 403        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
 404        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
 405        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 406        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
 407        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
 408        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
 409        /* force RBC into idle state */
 410        WREG32(mmUVD_RBC_RB_CNTL, tmp);
 411
 412        /* set the write pointer delay */
 413        WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 414
 415        /* set the wb address */
 416        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 417
 418        /* programm the RB_BASE for ring buffer */
 419        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
 420                        lower_32_bits(ring->gpu_addr));
 421        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
 422                        upper_32_bits(ring->gpu_addr));
 423
 424        /* Initialize the ring buffer's read and write pointers */
 425        WREG32(mmUVD_RBC_RB_RPTR, 0);
 426
 427        ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 428        WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
 429
 430        WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
 431
 432        return 0;
 433}
 434
 435/**
 436 * uvd_v5_0_stop - stop UVD block
 437 *
 438 * @adev: amdgpu_device pointer
 439 *
 440 * stop the UVD block
 441 */
 442static void uvd_v5_0_stop(struct amdgpu_device *adev)
 443{
 444        /* force RBC into idle state */
 445        WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 446
 447        /* Stall UMC and register bus before resetting VCPU */
 448        WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 449        mdelay(1);
 450
 451        /* put VCPU into reset */
 452        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 453        mdelay(5);
 454
 455        /* disable VCPU clock */
 456        WREG32(mmUVD_VCPU_CNTL, 0x0);
 457
 458        /* Unstall UMC and register bus */
 459        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 460}
 461
 462/**
 463 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
 464 *
 465 * @ring: amdgpu_ring pointer
 466 * @fence: fence to emit
 467 *
 468 * Write a fence and a trap command to the ring.
 469 */
 470static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 471                                     unsigned flags)
 472{
 473        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 474
 475        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 476        amdgpu_ring_write(ring, seq);
 477        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 478        amdgpu_ring_write(ring, addr & 0xffffffff);
 479        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 480        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 481        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 482        amdgpu_ring_write(ring, 0);
 483
 484        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 485        amdgpu_ring_write(ring, 0);
 486        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 487        amdgpu_ring_write(ring, 0);
 488        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 489        amdgpu_ring_write(ring, 2);
 490}
 491
 492/**
 493 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
 494 *
 495 * @ring: amdgpu_ring pointer
 496 *
 497 * Emits an hdp flush.
 498 */
 499static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 500{
 501        amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
 502        amdgpu_ring_write(ring, 0);
 503}
 504
 505/**
 506 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
 507 *
 508 * @ring: amdgpu_ring pointer
 509 *
 510 * Emits an hdp invalidate.
 511 */
 512static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
 513{
 514        amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
 515        amdgpu_ring_write(ring, 1);
 516}
 517
 518/**
 519 * uvd_v5_0_ring_test_ring - register write test
 520 *
 521 * @ring: amdgpu_ring pointer
 522 *
 523 * Test if we can successfully write to the context register
 524 */
 525static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
 526{
 527        struct amdgpu_device *adev = ring->adev;
 528        uint32_t tmp = 0;
 529        unsigned i;
 530        int r;
 531
 532        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 533        r = amdgpu_ring_alloc(ring, 3);
 534        if (r) {
 535                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 536                          ring->idx, r);
 537                return r;
 538        }
 539        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 540        amdgpu_ring_write(ring, 0xDEADBEEF);
 541        amdgpu_ring_commit(ring);
 542        for (i = 0; i < adev->usec_timeout; i++) {
 543                tmp = RREG32(mmUVD_CONTEXT_ID);
 544                if (tmp == 0xDEADBEEF)
 545                        break;
 546                DRM_UDELAY(1);
 547        }
 548
 549        if (i < adev->usec_timeout) {
 550                DRM_INFO("ring test on %d succeeded in %d usecs\n",
 551                         ring->idx, i);
 552        } else {
 553                DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 554                          ring->idx, tmp);
 555                r = -EINVAL;
 556        }
 557        return r;
 558}
 559
 560/**
 561 * uvd_v5_0_ring_emit_ib - execute indirect buffer
 562 *
 563 * @ring: amdgpu_ring pointer
 564 * @ib: indirect buffer to execute
 565 *
 566 * Write ring commands to execute the indirect buffer
 567 */
 568static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
 569                                  struct amdgpu_ib *ib,
 570                                  unsigned vm_id, bool ctx_switch)
 571{
 572        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
 573        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
 574        amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
 575        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
 576        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
 577        amdgpu_ring_write(ring, ib->length_dw);
 578}
 579
 580static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring)
 581{
 582        return
 583                6; /* uvd_v5_0_ring_emit_ib */
 584}
 585
 586static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring)
 587{
 588        return
 589                2 + /* uvd_v5_0_ring_emit_hdp_flush */
 590                2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
 591                14; /* uvd_v5_0_ring_emit_fence  x1 no user fence */
 592}
 593
 594static bool uvd_v5_0_is_idle(void *handle)
 595{
 596        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 597
 598        return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 599}
 600
 601static int uvd_v5_0_wait_for_idle(void *handle)
 602{
 603        unsigned i;
 604        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 605
 606        for (i = 0; i < adev->usec_timeout; i++) {
 607                if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
 608                        return 0;
 609        }
 610        return -ETIMEDOUT;
 611}
 612
 613static int uvd_v5_0_soft_reset(void *handle)
 614{
 615        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 616
 617        uvd_v5_0_stop(adev);
 618
 619        WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
 620                        ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 621        mdelay(5);
 622
 623        return uvd_v5_0_start(adev);
 624}
 625
 626static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
 627                                        struct amdgpu_irq_src *source,
 628                                        unsigned type,
 629                                        enum amdgpu_interrupt_state state)
 630{
 631        // TODO
 632        return 0;
 633}
 634
 635static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
 636                                      struct amdgpu_irq_src *source,
 637                                      struct amdgpu_iv_entry *entry)
 638{
 639        DRM_DEBUG("IH: UVD TRAP\n");
 640        amdgpu_fence_process(&adev->uvd.ring);
 641        return 0;
 642}
 643
 644static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
 645{
 646        uint32_t data, data1, data2, suvd_flags;
 647
 648        data = RREG32(mmUVD_CGC_CTRL);
 649        data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 650        data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
 651
 652        data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
 653                  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
 654
 655        suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
 656                     UVD_SUVD_CGC_GATE__SIT_MASK |
 657                     UVD_SUVD_CGC_GATE__SMP_MASK |
 658                     UVD_SUVD_CGC_GATE__SCM_MASK |
 659                     UVD_SUVD_CGC_GATE__SDB_MASK;
 660
 661        data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
 662                (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
 663                (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
 664
 665        data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
 666                        UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
 667                        UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
 668                        UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
 669                        UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
 670                        UVD_CGC_CTRL__SYS_MODE_MASK |
 671                        UVD_CGC_CTRL__UDEC_MODE_MASK |
 672                        UVD_CGC_CTRL__MPEG2_MODE_MASK |
 673                        UVD_CGC_CTRL__REGS_MODE_MASK |
 674                        UVD_CGC_CTRL__RBC_MODE_MASK |
 675                        UVD_CGC_CTRL__LMI_MC_MODE_MASK |
 676                        UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
 677                        UVD_CGC_CTRL__IDCT_MODE_MASK |
 678                        UVD_CGC_CTRL__MPRD_MODE_MASK |
 679                        UVD_CGC_CTRL__MPC_MODE_MASK |
 680                        UVD_CGC_CTRL__LBSI_MODE_MASK |
 681                        UVD_CGC_CTRL__LRBBM_MODE_MASK |
 682                        UVD_CGC_CTRL__WCB_MODE_MASK |
 683                        UVD_CGC_CTRL__VCPU_MODE_MASK |
 684                        UVD_CGC_CTRL__JPEG_MODE_MASK |
 685                        UVD_CGC_CTRL__SCPU_MODE_MASK);
 686        data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
 687                        UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
 688                        UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
 689                        UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
 690                        UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
 691        data1 |= suvd_flags;
 692
 693        WREG32(mmUVD_CGC_CTRL, data);
 694        WREG32(mmUVD_CGC_GATE, 0);
 695        WREG32(mmUVD_SUVD_CGC_GATE, data1);
 696        WREG32(mmUVD_SUVD_CGC_CTRL, data2);
 697}
 698
 699#if 0
 700static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
 701{
 702        uint32_t data, data1, cgc_flags, suvd_flags;
 703
 704        data = RREG32(mmUVD_CGC_GATE);
 705        data1 = RREG32(mmUVD_SUVD_CGC_GATE);
 706
 707        cgc_flags = UVD_CGC_GATE__SYS_MASK |
 708                                UVD_CGC_GATE__UDEC_MASK |
 709                                UVD_CGC_GATE__MPEG2_MASK |
 710                                UVD_CGC_GATE__RBC_MASK |
 711                                UVD_CGC_GATE__LMI_MC_MASK |
 712                                UVD_CGC_GATE__IDCT_MASK |
 713                                UVD_CGC_GATE__MPRD_MASK |
 714                                UVD_CGC_GATE__MPC_MASK |
 715                                UVD_CGC_GATE__LBSI_MASK |
 716                                UVD_CGC_GATE__LRBBM_MASK |
 717                                UVD_CGC_GATE__UDEC_RE_MASK |
 718                                UVD_CGC_GATE__UDEC_CM_MASK |
 719                                UVD_CGC_GATE__UDEC_IT_MASK |
 720                                UVD_CGC_GATE__UDEC_DB_MASK |
 721                                UVD_CGC_GATE__UDEC_MP_MASK |
 722                                UVD_CGC_GATE__WCB_MASK |
 723                                UVD_CGC_GATE__VCPU_MASK |
 724                                UVD_CGC_GATE__SCPU_MASK;
 725
 726        suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
 727                                UVD_SUVD_CGC_GATE__SIT_MASK |
 728                                UVD_SUVD_CGC_GATE__SMP_MASK |
 729                                UVD_SUVD_CGC_GATE__SCM_MASK |
 730                                UVD_SUVD_CGC_GATE__SDB_MASK;
 731
 732        data |= cgc_flags;
 733        data1 |= suvd_flags;
 734
 735        WREG32(mmUVD_CGC_GATE, data);
 736        WREG32(mmUVD_SUVD_CGC_GATE, data1);
 737}
 738#endif
 739
 740static int uvd_v5_0_set_clockgating_state(void *handle,
 741                                          enum amd_clockgating_state state)
 742{
 743        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 744        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
 745        static int curstate = -1;
 746
 747        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
 748                return 0;
 749
 750        if (curstate == state)
 751                return 0;
 752
 753        curstate = state;
 754        if (enable) {
 755                /* disable HW gating and enable Sw gating */
 756                uvd_v5_0_set_sw_clock_gating(adev);
 757        } else {
 758                /* wait for STATUS to clear */
 759                if (uvd_v5_0_wait_for_idle(handle))
 760                        return -EBUSY;
 761
 762                /* enable HW gates because UVD is idle */
 763/*              uvd_v5_0_set_hw_clock_gating(adev); */
 764        }
 765
 766        return 0;
 767}
 768
 769static int uvd_v5_0_set_powergating_state(void *handle,
 770                                          enum amd_powergating_state state)
 771{
 772        /* This doesn't actually powergate the UVD block.
 773         * That's done in the dpm code via the SMC.  This
 774         * just re-inits the block as necessary.  The actual
 775         * gating still happens in the dpm code.  We should
 776         * revisit this when there is a cleaner line between
 777         * the smc and the hw blocks
 778         */
 779        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 780
 781        if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
 782                return 0;
 783
 784        if (state == AMD_PG_STATE_GATE) {
 785                uvd_v5_0_stop(adev);
 786                return 0;
 787        } else {
 788                return uvd_v5_0_start(adev);
 789        }
 790}
 791
 792const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
 793        .name = "uvd_v5_0",
 794        .early_init = uvd_v5_0_early_init,
 795        .late_init = NULL,
 796        .sw_init = uvd_v5_0_sw_init,
 797        .sw_fini = uvd_v5_0_sw_fini,
 798        .hw_init = uvd_v5_0_hw_init,
 799        .hw_fini = uvd_v5_0_hw_fini,
 800        .suspend = uvd_v5_0_suspend,
 801        .resume = uvd_v5_0_resume,
 802        .is_idle = uvd_v5_0_is_idle,
 803        .wait_for_idle = uvd_v5_0_wait_for_idle,
 804        .soft_reset = uvd_v5_0_soft_reset,
 805        .set_clockgating_state = uvd_v5_0_set_clockgating_state,
 806        .set_powergating_state = uvd_v5_0_set_powergating_state,
 807};
 808
 809static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
 810        .get_rptr = uvd_v5_0_ring_get_rptr,
 811        .get_wptr = uvd_v5_0_ring_get_wptr,
 812        .set_wptr = uvd_v5_0_ring_set_wptr,
 813        .parse_cs = amdgpu_uvd_ring_parse_cs,
 814        .emit_ib = uvd_v5_0_ring_emit_ib,
 815        .emit_fence = uvd_v5_0_ring_emit_fence,
 816        .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
 817        .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
 818        .test_ring = uvd_v5_0_ring_test_ring,
 819        .test_ib = amdgpu_uvd_ring_test_ib,
 820        .insert_nop = amdgpu_ring_insert_nop,
 821        .pad_ib = amdgpu_ring_generic_pad_ib,
 822        .begin_use = amdgpu_uvd_ring_begin_use,
 823        .end_use = amdgpu_uvd_ring_end_use,
 824        .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size,
 825        .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size,
 826};
 827
 828static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
 829{
 830        adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
 831}
 832
 833static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
 834        .set = uvd_v5_0_set_interrupt_state,
 835        .process = uvd_v5_0_process_interrupt,
 836};
 837
 838static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
 839{
 840        adev->uvd.irq.num_types = 1;
 841        adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
 842}
 843