linux/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
<<
>>
Prefs
   1/*
   2 * Copyright 2020 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Sonny Jiang <sonny.jiang@amd.com>
  23 */
  24
  25#include <linux/firmware.h>
  26
  27#include "amdgpu.h"
  28#include "amdgpu_uvd.h"
  29#include "sid.h"
  30
  31#include "uvd/uvd_3_1_d.h"
  32#include "uvd/uvd_3_1_sh_mask.h"
  33
  34#include "oss/oss_1_0_d.h"
  35#include "oss/oss_1_0_sh_mask.h"
  36
  37/**
  38 * uvd_v3_1_ring_get_rptr - get read pointer
  39 *
  40 * @ring: amdgpu_ring pointer
  41 *
  42 * Returns the current hardware read pointer
  43 */
  44static uint64_t uvd_v3_1_ring_get_rptr(struct amdgpu_ring *ring)
  45{
  46        struct amdgpu_device *adev = ring->adev;
  47
  48        return RREG32(mmUVD_RBC_RB_RPTR);
  49}
  50
  51/**
  52 * uvd_v3_1_ring_get_wptr - get write pointer
  53 *
  54 * @ring: amdgpu_ring pointer
  55 *
  56 * Returns the current hardware write pointer
  57 */
  58static uint64_t uvd_v3_1_ring_get_wptr(struct amdgpu_ring *ring)
  59{
  60        struct amdgpu_device *adev = ring->adev;
  61
  62        return RREG32(mmUVD_RBC_RB_WPTR);
  63}
  64
  65/**
  66 * uvd_v3_1_ring_set_wptr - set write pointer
  67 *
  68 * @ring: amdgpu_ring pointer
  69 *
  70 * Commits the write pointer to the hardware
  71 */
  72static void uvd_v3_1_ring_set_wptr(struct amdgpu_ring *ring)
  73{
  74        struct amdgpu_device *adev = ring->adev;
  75
  76        WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
  77}
  78
  79/**
  80 * uvd_v3_1_ring_emit_ib - execute indirect buffer
  81 *
  82 * @ring: amdgpu_ring pointer
  83 * @job: iob associated with the indirect buffer
  84 * @ib: indirect buffer to execute
  85 * @flags: flags associated with the indirect buffer
  86 *
  87 * Write ring commands to execute the indirect buffer
  88 */
  89static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
  90                                  struct amdgpu_job *job,
  91                                  struct amdgpu_ib *ib,
  92                                  uint32_t flags)
  93{
  94        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
  95        amdgpu_ring_write(ring, ib->gpu_addr);
  96        amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
  97        amdgpu_ring_write(ring, ib->length_dw);
  98}
  99
 100/**
 101 * uvd_v3_1_ring_emit_fence - emit an fence & trap command
 102 *
 103 * @ring: amdgpu_ring pointer
 104 * @addr: address
 105 * @seq: sequence number
 106 * @flags: fence related flags
 107 *
 108 * Write a fence and a trap command to the ring.
 109 */
 110static void uvd_v3_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
 111                                 unsigned flags)
 112{
 113        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
 114
 115        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 116        amdgpu_ring_write(ring, seq);
 117        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 118        amdgpu_ring_write(ring, addr & 0xffffffff);
 119        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 120        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
 121        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 122        amdgpu_ring_write(ring, 0);
 123
 124        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
 125        amdgpu_ring_write(ring, 0);
 126        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
 127        amdgpu_ring_write(ring, 0);
 128        amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
 129        amdgpu_ring_write(ring, 2);
 130}
 131
 132/**
 133 * uvd_v3_1_ring_test_ring - register write test
 134 *
 135 * @ring: amdgpu_ring pointer
 136 *
 137 * Test if we can successfully write to the context register
 138 */
 139static int uvd_v3_1_ring_test_ring(struct amdgpu_ring *ring)
 140{
 141        struct amdgpu_device *adev = ring->adev;
 142        uint32_t tmp = 0;
 143        unsigned i;
 144        int r;
 145
 146        WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
 147        r = amdgpu_ring_alloc(ring, 3);
 148        if (r)
 149                return r;
 150
 151        amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
 152        amdgpu_ring_write(ring, 0xDEADBEEF);
 153        amdgpu_ring_commit(ring);
 154        for (i = 0; i < adev->usec_timeout; i++) {
 155                tmp = RREG32(mmUVD_CONTEXT_ID);
 156                if (tmp == 0xDEADBEEF)
 157                        break;
 158                udelay(1);
 159        }
 160
 161        if (i >= adev->usec_timeout)
 162                r = -ETIMEDOUT;
 163
 164        return r;
 165}
 166
 167static void uvd_v3_1_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 168{
 169        int i;
 170
 171        WARN_ON(ring->wptr % 2 || count % 2);
 172
 173        for (i = 0; i < count / 2; i++) {
 174                amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
 175                amdgpu_ring_write(ring, 0);
 176        }
 177}
 178
 179static const struct amdgpu_ring_funcs uvd_v3_1_ring_funcs = {
 180        .type = AMDGPU_RING_TYPE_UVD,
 181        .align_mask = 0xf,
 182        .support_64bit_ptrs = false,
 183        .no_user_fence = true,
 184        .get_rptr = uvd_v3_1_ring_get_rptr,
 185        .get_wptr = uvd_v3_1_ring_get_wptr,
 186        .set_wptr = uvd_v3_1_ring_set_wptr,
 187        .parse_cs = amdgpu_uvd_ring_parse_cs,
 188        .emit_frame_size =
 189                14, /* uvd_v3_1_ring_emit_fence  x1 no user fence */
 190        .emit_ib_size = 4, /* uvd_v3_1_ring_emit_ib */
 191        .emit_ib = uvd_v3_1_ring_emit_ib,
 192        .emit_fence = uvd_v3_1_ring_emit_fence,
 193        .test_ring = uvd_v3_1_ring_test_ring,
 194        .test_ib = amdgpu_uvd_ring_test_ib,
 195        .insert_nop = uvd_v3_1_ring_insert_nop,
 196        .pad_ib = amdgpu_ring_generic_pad_ib,
 197        .begin_use = amdgpu_uvd_ring_begin_use,
 198        .end_use = amdgpu_uvd_ring_end_use,
 199};
 200
 201static void uvd_v3_1_set_ring_funcs(struct amdgpu_device *adev)
 202{
 203        adev->uvd.inst->ring.funcs = &uvd_v3_1_ring_funcs;
 204}
 205
 206static void uvd_v3_1_set_dcm(struct amdgpu_device *adev,
 207                                                         bool sw_mode)
 208{
 209        u32 tmp, tmp2;
 210
 211        WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
 212
 213        tmp = RREG32(mmUVD_CGC_CTRL);
 214        tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
 215        tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
 216                (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
 217                (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
 218
 219        if (sw_mode) {
 220                tmp &= ~0x7ffff800;
 221                tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
 222                        UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
 223                        (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
 224        } else {
 225                tmp |= 0x7ffff800;
 226                tmp2 = 0;
 227        }
 228
 229        WREG32(mmUVD_CGC_CTRL, tmp);
 230        WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
 231}
 232
 233/**
 234 * uvd_v3_1_mc_resume - memory controller programming
 235 *
 236 * @adev: amdgpu_device pointer
 237 *
 238 * Let the UVD memory controller know it's offsets
 239 */
 240static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
 241{
 242        uint64_t addr;
 243        uint32_t size;
 244
 245        /* programm the VCPU memory controller bits 0-27 */
 246        addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
 247        size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
 248        WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
 249        WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
 250
 251        addr += size;
 252        size = AMDGPU_UVD_HEAP_SIZE >> 3;
 253        WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
 254        WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
 255
 256        addr += size;
 257        size = (AMDGPU_UVD_STACK_SIZE +
 258                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
 259        WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
 260        WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
 261
 262        /* bits 28-31 */
 263        addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
 264        WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
 265
 266        /* bits 32-39 */
 267        addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
 268        WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 269
 270        WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 271        WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 272        WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 273}
 274
 275/**
 276 * uvd_v3_1_fw_validate - FW validation operation
 277 *
 278 * @adev: amdgpu_device pointer
 279 *
 280 * Initialate and check UVD validation.
 281 */
 282static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
 283{
 284        int i;
 285        uint32_t keysel = adev->uvd.keyselect;
 286
 287        WREG32(mmUVD_FW_START, keysel);
 288
 289        for (i = 0; i < 10; ++i) {
 290                mdelay(10);
 291                if (RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__DONE_MASK)
 292                        break;
 293        }
 294
 295        if (i == 10)
 296                return -ETIMEDOUT;
 297
 298        if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__PASS_MASK))
 299                return -EINVAL;
 300
 301        for (i = 0; i < 10; ++i) {
 302                mdelay(10);
 303                if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__BUSY_MASK))
 304                        break;
 305        }
 306
 307        if (i == 10)
 308                return -ETIMEDOUT;
 309
 310        return 0;
 311}
 312
 313/**
 314 * uvd_v3_1_start - start UVD block
 315 *
 316 * @adev: amdgpu_device pointer
 317 *
 318 * Setup and start the UVD block
 319 */
 320static int uvd_v3_1_start(struct amdgpu_device *adev)
 321{
 322        struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 323        uint32_t rb_bufsz;
 324        int i, j, r;
 325        u32 tmp;
 326        /* disable byte swapping */
 327        u32 lmi_swap_cntl = 0;
 328        u32 mp_swap_cntl = 0;
 329
 330        /* set uvd busy */
 331        WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
 332
 333        uvd_v3_1_set_dcm(adev, true);
 334        WREG32(mmUVD_CGC_GATE, 0);
 335
 336        /* take UVD block out of reset */
 337        WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 338        mdelay(5);
 339
 340        /* enable VCPU clock */
 341        WREG32(mmUVD_VCPU_CNTL,  1 << 9);
 342
 343        /* disable interrupt */
 344        WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 345
 346#ifdef __BIG_ENDIAN
 347        /* swap (8 in 32) RB and IB */
 348        lmi_swap_cntl = 0xa;
 349        mp_swap_cntl = 0;
 350#endif
 351        WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
 352        WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
 353
 354        /* initialize UVD memory controller */
 355        WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
 356                (1 << 21) | (1 << 9) | (1 << 20));
 357
 358        tmp = RREG32(mmUVD_MPC_CNTL);
 359        WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
 360
 361        WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
 362        WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
 363        WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
 364        WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
 365        WREG32(mmUVD_MPC_SET_ALU, 0);
 366        WREG32(mmUVD_MPC_SET_MUX, 0x88);
 367
 368        tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
 369        WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
 370
 371        /* enable UMC */
 372        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
 373
 374        WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
 375
 376        WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 377
 378        WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 379
 380        mdelay(10);
 381
 382        for (i = 0; i < 10; ++i) {
 383                uint32_t status;
 384                for (j = 0; j < 100; ++j) {
 385                        status = RREG32(mmUVD_STATUS);
 386                        if (status & 2)
 387                                break;
 388                        mdelay(10);
 389                }
 390                r = 0;
 391                if (status & 2)
 392                        break;
 393
 394                DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
 395                WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
 396                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 397                mdelay(10);
 398                WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 399                mdelay(10);
 400                r = -1;
 401        }
 402
 403        if (r) {
 404                DRM_ERROR("UVD not responding, giving up!!!\n");
 405                return r;
 406        }
 407
 408        /* enable interrupt */
 409        WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
 410
 411        WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
 412
 413        /* force RBC into idle state */
 414        WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 415
 416        /* Set the write pointer delay */
 417        WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 418
 419        /* programm the 4GB memory segment for rptr and ring buffer */
 420        WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
 421                   (0x7 << 16) | (0x1 << 31));
 422
 423        /* Initialize the ring buffer's read and write pointers */
 424        WREG32(mmUVD_RBC_RB_RPTR, 0x0);
 425
 426        ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
 427        WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 428
 429        /* set the ring address */
 430        WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
 431
 432        /* Set ring buffer size */
 433        rb_bufsz = order_base_2(ring->ring_size);
 434        rb_bufsz = (0x1 << 8) | rb_bufsz;
 435        WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
 436
 437        return 0;
 438}
 439
 440/**
 441 * uvd_v3_1_stop - stop UVD block
 442 *
 443 * @adev: amdgpu_device pointer
 444 *
 445 * stop the UVD block
 446 */
 447static void uvd_v3_1_stop(struct amdgpu_device *adev)
 448{
 449        uint32_t i, j;
 450        uint32_t status;
 451
 452        WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
 453
 454        for (i = 0; i < 10; ++i) {
 455                for (j = 0; j < 100; ++j) {
 456                        status = RREG32(mmUVD_STATUS);
 457                        if (status & 2)
 458                                break;
 459                        mdelay(1);
 460                }
 461                if (status & 2)
 462                        break;
 463        }
 464
 465        for (i = 0; i < 10; ++i) {
 466                for (j = 0; j < 100; ++j) {
 467                        status = RREG32(mmUVD_LMI_STATUS);
 468                        if (status & 0xf)
 469                                break;
 470                        mdelay(1);
 471                }
 472                if (status & 0xf)
 473                        break;
 474        }
 475
 476        /* Stall UMC and register bus before resetting VCPU */
 477        WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
 478
 479        for (i = 0; i < 10; ++i) {
 480                for (j = 0; j < 100; ++j) {
 481                        status = RREG32(mmUVD_LMI_STATUS);
 482                        if (status & 0x240)
 483                                break;
 484                        mdelay(1);
 485                }
 486                if (status & 0x240)
 487                        break;
 488        }
 489
 490        WREG32_P(0x3D49, 0, ~(1 << 2));
 491
 492        WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
 493
 494        /* put LMI, VCPU, RBC etc... into reset */
 495        WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 496                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 497                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
 498
 499        WREG32(mmUVD_STATUS, 0);
 500
 501        uvd_v3_1_set_dcm(adev, false);
 502}
 503
 504static int uvd_v3_1_set_interrupt_state(struct amdgpu_device *adev,
 505                                        struct amdgpu_irq_src *source,
 506                                        unsigned type,
 507                                        enum amdgpu_interrupt_state state)
 508{
 509        return 0;
 510}
 511
 512static int uvd_v3_1_process_interrupt(struct amdgpu_device *adev,
 513                                      struct amdgpu_irq_src *source,
 514                                      struct amdgpu_iv_entry *entry)
 515{
 516        DRM_DEBUG("IH: UVD TRAP\n");
 517        amdgpu_fence_process(&adev->uvd.inst->ring);
 518        return 0;
 519}
 520
 521
 522static const struct amdgpu_irq_src_funcs uvd_v3_1_irq_funcs = {
 523        .set = uvd_v3_1_set_interrupt_state,
 524        .process = uvd_v3_1_process_interrupt,
 525};
 526
 527static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev)
 528{
 529        adev->uvd.inst->irq.num_types = 1;
 530        adev->uvd.inst->irq.funcs = &uvd_v3_1_irq_funcs;
 531}
 532
 533
 534static int uvd_v3_1_early_init(void *handle)
 535{
 536        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 537        adev->uvd.num_uvd_inst = 1;
 538
 539        uvd_v3_1_set_ring_funcs(adev);
 540        uvd_v3_1_set_irq_funcs(adev);
 541
 542        return 0;
 543}
 544
 545static int uvd_v3_1_sw_init(void *handle)
 546{
 547        struct amdgpu_ring *ring;
 548        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 549        int r;
 550        void *ptr;
 551        uint32_t ucode_len;
 552
 553        /* UVD TRAP */
 554        r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
 555        if (r)
 556                return r;
 557
 558        r = amdgpu_uvd_sw_init(adev);
 559        if (r)
 560                return r;
 561
 562        ring = &adev->uvd.inst->ring;
 563        sprintf(ring->name, "uvd");
 564        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
 565                         AMDGPU_RING_PRIO_DEFAULT, NULL);
 566        if (r)
 567                return r;
 568
 569        r = amdgpu_uvd_resume(adev);
 570        if (r)
 571                return r;
 572
 573        /* Retrieval firmware validate key */
 574        ptr = adev->uvd.inst[0].cpu_addr;
 575        ptr += 192 + 16;
 576        memcpy(&ucode_len, ptr, 4);
 577        ptr += ucode_len;
 578        memcpy(&adev->uvd.keyselect, ptr, 4);
 579
 580        r = amdgpu_uvd_entity_init(adev);
 581
 582        return r;
 583}
 584
 585static int uvd_v3_1_sw_fini(void *handle)
 586{
 587        int r;
 588        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 589
 590        r = amdgpu_uvd_suspend(adev);
 591        if (r)
 592                return r;
 593
 594        return amdgpu_uvd_sw_fini(adev);
 595}
 596
 597static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
 598                                 bool enable)
 599{
 600        u32 orig, data;
 601
 602        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
 603                data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 604                data |= 0x3fff;
 605                WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 606
 607                orig = data = RREG32(mmUVD_CGC_CTRL);
 608                data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 609                if (orig != data)
 610                        WREG32(mmUVD_CGC_CTRL, data);
 611        } else {
 612                data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
 613                data &= ~0x3fff;
 614                WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
 615
 616                orig = data = RREG32(mmUVD_CGC_CTRL);
 617                data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
 618                if (orig != data)
 619                        WREG32(mmUVD_CGC_CTRL, data);
 620        }
 621}
 622
 623/**
 624 * uvd_v3_1_hw_init - start and test UVD block
 625 *
 626 * @handle: handle used to pass amdgpu_device pointer
 627 *
 628 * Initialize the hardware, boot up the VCPU and do some testing
 629 */
 630static int uvd_v3_1_hw_init(void *handle)
 631{
 632        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 633        struct amdgpu_ring *ring = &adev->uvd.inst->ring;
 634        uint32_t tmp;
 635        int r;
 636
 637        uvd_v3_1_mc_resume(adev);
 638
 639        r = uvd_v3_1_fw_validate(adev);
 640        if (r) {
 641                DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r);
 642                return r;
 643        }
 644
 645        uvd_v3_1_enable_mgcg(adev, true);
 646        amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
 647
 648        uvd_v3_1_start(adev);
 649
 650        r = amdgpu_ring_test_helper(ring);
 651        if (r) {
 652                DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r);
 653                goto done;
 654        }
 655
 656        r = amdgpu_ring_alloc(ring, 10);
 657        if (r) {
 658                DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
 659                goto done;
 660        }
 661
 662        tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
 663        amdgpu_ring_write(ring, tmp);
 664        amdgpu_ring_write(ring, 0xFFFFF);
 665
 666        tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
 667        amdgpu_ring_write(ring, tmp);
 668        amdgpu_ring_write(ring, 0xFFFFF);
 669
 670        tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
 671        amdgpu_ring_write(ring, tmp);
 672        amdgpu_ring_write(ring, 0xFFFFF);
 673
 674        /* Clear timeout status bits */
 675        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
 676        amdgpu_ring_write(ring, 0x8);
 677
 678        amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
 679        amdgpu_ring_write(ring, 3);
 680
 681        amdgpu_ring_commit(ring);
 682
 683done:
 684        if (!r)
 685                DRM_INFO("UVD initialized successfully.\n");
 686
 687        return r;
 688}
 689
 690/**
 691 * uvd_v3_1_hw_fini - stop the hardware block
 692 *
 693 * @handle: handle used to pass amdgpu_device pointer
 694 *
 695 * Stop the UVD block, mark ring as not ready any more
 696 */
 697static int uvd_v3_1_hw_fini(void *handle)
 698{
 699        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 700
 701        /*
 702         * Proper cleanups before halting the HW engine:
 703         *   - cancel the delayed idle work
 704         *   - enable powergating
 705         *   - enable clockgating
 706         *   - disable dpm
 707         *
 708         * TODO: to align with the VCN implementation, move the
 709         * jobs for clockgating/powergating/dpm setting to
 710         * ->set_powergating_state().
 711         */
 712        cancel_delayed_work_sync(&adev->uvd.idle_work);
 713
 714        if (adev->pm.dpm_enabled) {
 715                amdgpu_dpm_enable_uvd(adev, false);
 716        } else {
 717                amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 718                /* shutdown the UVD block */
 719                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 720                                                       AMD_PG_STATE_GATE);
 721                amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 722                                                       AMD_CG_STATE_GATE);
 723        }
 724
 725        if (RREG32(mmUVD_STATUS) != 0)
 726                uvd_v3_1_stop(adev);
 727
 728        return 0;
 729}
 730
 731static int uvd_v3_1_suspend(void *handle)
 732{
 733        int r;
 734        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 735
 736        r = uvd_v3_1_hw_fini(adev);
 737        if (r)
 738                return r;
 739
 740        return amdgpu_uvd_suspend(adev);
 741}
 742
 743static int uvd_v3_1_resume(void *handle)
 744{
 745        int r;
 746        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 747
 748        r = amdgpu_uvd_resume(adev);
 749        if (r)
 750                return r;
 751
 752        return uvd_v3_1_hw_init(adev);
 753}
 754
 755static bool uvd_v3_1_is_idle(void *handle)
 756{
 757        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 758
 759        return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
 760}
 761
 762static int uvd_v3_1_wait_for_idle(void *handle)
 763{
 764        unsigned i;
 765        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 766
 767        for (i = 0; i < adev->usec_timeout; i++) {
 768                if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
 769                        return 0;
 770        }
 771        return -ETIMEDOUT;
 772}
 773
 774static int uvd_v3_1_soft_reset(void *handle)
 775{
 776        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 777
 778        uvd_v3_1_stop(adev);
 779
 780        WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
 781                         ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
 782        mdelay(5);
 783
 784        return uvd_v3_1_start(adev);
 785}
 786
 787static int uvd_v3_1_set_clockgating_state(void *handle,
 788                                          enum amd_clockgating_state state)
 789{
 790        return 0;
 791}
 792
 793static int uvd_v3_1_set_powergating_state(void *handle,
 794                                          enum amd_powergating_state state)
 795{
 796        return 0;
 797}
 798
 799static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
 800        .name = "uvd_v3_1",
 801        .early_init = uvd_v3_1_early_init,
 802        .late_init = NULL,
 803        .sw_init = uvd_v3_1_sw_init,
 804        .sw_fini = uvd_v3_1_sw_fini,
 805        .hw_init = uvd_v3_1_hw_init,
 806        .hw_fini = uvd_v3_1_hw_fini,
 807        .suspend = uvd_v3_1_suspend,
 808        .resume = uvd_v3_1_resume,
 809        .is_idle = uvd_v3_1_is_idle,
 810        .wait_for_idle = uvd_v3_1_wait_for_idle,
 811        .soft_reset = uvd_v3_1_soft_reset,
 812        .set_clockgating_state = uvd_v3_1_set_clockgating_state,
 813        .set_powergating_state = uvd_v3_1_set_powergating_state,
 814};
 815
 816const struct amdgpu_ip_block_version uvd_v3_1_ip_block =
 817{
 818        .type = AMD_IP_BLOCK_TYPE_UVD,
 819        .major = 3,
 820        .minor = 1,
 821        .rev = 0,
 822        .funcs = &uvd_v3_1_ip_funcs,
 823};
 824