linux/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include <linux/firmware.h>
  25
  26#include "amdgpu.h"
  27#include "amdgpu_uvd.h"
  28#include "soc15.h"
  29#include "soc15d.h"
  30#include "soc15_common.h"
  31#include "mmsch_v1_0.h"
  32
  33#include "uvd/uvd_7_0_offset.h"
  34#include "uvd/uvd_7_0_sh_mask.h"
  35#include "vce/vce_4_0_offset.h"
  36#include "vce/vce_4_0_default.h"
  37#include "vce/vce_4_0_sh_mask.h"
  38#include "nbif/nbif_6_1_offset.h"
  39#include "mmhub/mmhub_1_0_offset.h"
  40#include "mmhub/mmhub_1_0_sh_mask.h"
  41#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
  42
  43#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
  44#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
  45//UVD_PG0_CC_UVD_HARVESTING
  46#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
  47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
  48
  49#define UVD7_MAX_HW_INSTANCES_VEGA20                    2
  50
  51static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
  52static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  53static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  54static int uvd_v7_0_start(struct amdgpu_device *adev);
  55static void uvd_v7_0_stop(struct amdgpu_device *adev);
  56static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
  57
  58static int amdgpu_ih_clientid_uvds[] = {
  59        SOC15_IH_CLIENTID_UVD,
  60        SOC15_IH_CLIENTID_UVD1
  61};
  62
  63/**
  64 * uvd_v7_0_ring_get_rptr - get read pointer
  65 *
  66 * @ring: amdgpu_ring pointer
  67 *
  68 * Returns the current hardware read pointer
  69 */
  70static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
  71{
  72        struct amdgpu_device *adev = ring->adev;
  73
  74        return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
  75}
  76
  77/**
  78 * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
  79 *
  80 * @ring: amdgpu_ring pointer
  81 *
  82 * Returns the current hardware enc read pointer
  83 */
  84static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
  85{
  86        struct amdgpu_device *adev = ring->adev;
  87
  88        if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
  89                return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
  90        else
  91                return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
  92}
  93
  94/**
  95 * uvd_v7_0_ring_get_wptr - get write pointer
  96 *
  97 * @ring: amdgpu_ring pointer
  98 *
  99 * Returns the current hardware write pointer
 100 */
 101static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
 102{
 103        struct amdgpu_device *adev = ring->adev;
 104
 105        return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
 106}
 107
 108/**
 109 * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
 110 *
 111 * @ring: amdgpu_ring pointer
 112 *
 113 * Returns the current hardware enc write pointer
 114 */
 115static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
 116{
 117        struct amdgpu_device *adev = ring->adev;
 118
 119        if (ring->use_doorbell)
 120                return adev->wb.wb[ring->wptr_offs];
 121
 122        if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 123                return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
 124        else
 125                return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
 126}
 127
 128/**
 129 * uvd_v7_0_ring_set_wptr - set write pointer
 130 *
 131 * @ring: amdgpu_ring pointer
 132 *
 133 * Commits the write pointer to the hardware
 134 */
 135static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
 136{
 137        struct amdgpu_device *adev = ring->adev;
 138
 139        WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
 140}
 141
 142/**
 143 * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
 144 *
 145 * @ring: amdgpu_ring pointer
 146 *
 147 * Commits the enc write pointer to the hardware
 148 */
 149static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
 150{
 151        struct amdgpu_device *adev = ring->adev;
 152
 153        if (ring->use_doorbell) {
 154                /* XXX check if swapping is necessary on BE */
 155                adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
 156                WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 157                return;
 158        }
 159
 160        if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
 161                WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
 162                        lower_32_bits(ring->wptr));
 163        else
 164                WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
 165                        lower_32_bits(ring->wptr));
 166}
 167
 168/**
 169 * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
 170 *
 171 * @ring: the engine to test on
 172 *
 173 */
 174static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
 175{
 176        struct amdgpu_device *adev = ring->adev;
 177        uint32_t rptr;
 178        unsigned i;
 179        int r;
 180
 181        if (amdgpu_sriov_vf(adev))
 182                return 0;
 183
 184        r = amdgpu_ring_alloc(ring, 16);
 185        if (r)
 186                return r;
 187
 188        rptr = amdgpu_ring_get_rptr(ring);
 189
 190        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
 191        amdgpu_ring_commit(ring);
 192
 193        for (i = 0; i < adev->usec_timeout; i++) {
 194                if (amdgpu_ring_get_rptr(ring) != rptr)
 195                        break;
 196                udelay(1);
 197        }
 198
 199        if (i >= adev->usec_timeout)
 200                r = -ETIMEDOUT;
 201
 202        return r;
 203}
 204
 205/**
 206 * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
 207 *
 208 * @ring: ring we should submit the msg to
 209 * @handle: session handle to use
 210 * @bo: amdgpu object for which we query the offset
 211 * @fence: optional fence to return
 212 *
 213 * Open up a stream for HW test
 214 */
 215static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 216                                       struct amdgpu_bo *bo,
 217                                       struct dma_fence **fence)
 218{
 219        const unsigned ib_size_dw = 16;
 220        struct amdgpu_job *job;
 221        struct amdgpu_ib *ib;
 222        struct dma_fence *f = NULL;
 223        uint64_t addr;
 224        int i, r;
 225
 226        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 227                                        AMDGPU_IB_POOL_DIRECT, &job);
 228        if (r)
 229                return r;
 230
 231        ib = &job->ibs[0];
 232        addr = amdgpu_bo_gpu_offset(bo);
 233
 234        ib->length_dw = 0;
 235        ib->ptr[ib->length_dw++] = 0x00000018;
 236        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 237        ib->ptr[ib->length_dw++] = handle;
 238        ib->ptr[ib->length_dw++] = 0x00000000;
 239        ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 240        ib->ptr[ib->length_dw++] = addr;
 241
 242        ib->ptr[ib->length_dw++] = 0x00000014;
 243        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 244        ib->ptr[ib->length_dw++] = 0x0000001c;
 245        ib->ptr[ib->length_dw++] = 0x00000000;
 246        ib->ptr[ib->length_dw++] = 0x00000000;
 247
 248        ib->ptr[ib->length_dw++] = 0x00000008;
 249        ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 250
 251        for (i = ib->length_dw; i < ib_size_dw; ++i)
 252                ib->ptr[i] = 0x0;
 253
 254        r = amdgpu_job_submit_direct(job, ring, &f);
 255        if (r)
 256                goto err;
 257
 258        if (fence)
 259                *fence = dma_fence_get(f);
 260        dma_fence_put(f);
 261        return 0;
 262
 263err:
 264        amdgpu_job_free(job);
 265        return r;
 266}
 267
 268/**
 269 * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
 270 *
 271 * @ring: ring we should submit the msg to
 272 * @handle: session handle to use
 273 * @bo: amdgpu object for which we query the offset
 274 * @fence: optional fence to return
 275 *
 276 * Close up a stream for HW test or if userspace failed to do so
 277 */
 278static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 279                                        struct amdgpu_bo *bo,
 280                                        struct dma_fence **fence)
 281{
 282        const unsigned ib_size_dw = 16;
 283        struct amdgpu_job *job;
 284        struct amdgpu_ib *ib;
 285        struct dma_fence *f = NULL;
 286        uint64_t addr;
 287        int i, r;
 288
 289        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 290                                        AMDGPU_IB_POOL_DIRECT, &job);
 291        if (r)
 292                return r;
 293
 294        ib = &job->ibs[0];
 295        addr = amdgpu_bo_gpu_offset(bo);
 296
 297        ib->length_dw = 0;
 298        ib->ptr[ib->length_dw++] = 0x00000018;
 299        ib->ptr[ib->length_dw++] = 0x00000001;
 300        ib->ptr[ib->length_dw++] = handle;
 301        ib->ptr[ib->length_dw++] = 0x00000000;
 302        ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 303        ib->ptr[ib->length_dw++] = addr;
 304
 305        ib->ptr[ib->length_dw++] = 0x00000014;
 306        ib->ptr[ib->length_dw++] = 0x00000002;
 307        ib->ptr[ib->length_dw++] = 0x0000001c;
 308        ib->ptr[ib->length_dw++] = 0x00000000;
 309        ib->ptr[ib->length_dw++] = 0x00000000;
 310
 311        ib->ptr[ib->length_dw++] = 0x00000008;
 312        ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 313
 314        for (i = ib->length_dw; i < ib_size_dw; ++i)
 315                ib->ptr[i] = 0x0;
 316
 317        r = amdgpu_job_submit_direct(job, ring, &f);
 318        if (r)
 319                goto err;
 320
 321        if (fence)
 322                *fence = dma_fence_get(f);
 323        dma_fence_put(f);
 324        return 0;
 325
 326err:
 327        amdgpu_job_free(job);
 328        return r;
 329}
 330
 331/**
 332 * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
 333 *
 334 * @ring: the engine to test on
 335 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
 336 *
 337 */
 338static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 339{
 340        struct dma_fence *fence = NULL;
 341        struct amdgpu_bo *bo = NULL;
 342        long r;
 343
 344        r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 345                                      AMDGPU_GEM_DOMAIN_VRAM,
 346                                      &bo, NULL, NULL);
 347        if (r)
 348                return r;
 349
 350        r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
 351        if (r)
 352                goto error;
 353
 354        r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
 355        if (r)
 356                goto error;
 357
 358        r = dma_fence_wait_timeout(fence, false, timeout);
 359        if (r == 0)
 360                r = -ETIMEDOUT;
 361        else if (r > 0)
 362                r = 0;
 363
 364error:
 365        dma_fence_put(fence);
 366        amdgpu_bo_unpin(bo);
 367        amdgpu_bo_unreserve(bo);
 368        amdgpu_bo_unref(&bo);
 369        return r;
 370}
 371
 372static int uvd_v7_0_early_init(void *handle)
 373{
 374        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 375
 376        if (adev->asic_type == CHIP_VEGA20) {
 377                u32 harvest;
 378                int i;
 379
 380                adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
 381                for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
 382                        harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
 383                        if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
 384                                adev->uvd.harvest_config |= 1 << i;
 385                        }
 386                }
 387                if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
 388                                                 AMDGPU_UVD_HARVEST_UVD1))
 389                        /* both instances are harvested, disable the block */
 390                        return -ENOENT;
 391        } else {
 392                adev->uvd.num_uvd_inst = 1;
 393        }
 394
 395        if (amdgpu_sriov_vf(adev))
 396                adev->uvd.num_enc_rings = 1;
 397        else
 398                adev->uvd.num_enc_rings = 2;
 399        uvd_v7_0_set_ring_funcs(adev);
 400        uvd_v7_0_set_enc_ring_funcs(adev);
 401        uvd_v7_0_set_irq_funcs(adev);
 402
 403        return 0;
 404}
 405
 406static int uvd_v7_0_sw_init(void *handle)
 407{
 408        struct amdgpu_ring *ring;
 409
 410        int i, j, r;
 411        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 412
 413        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 414                if (adev->uvd.harvest_config & (1 << j))
 415                        continue;
 416                /* UVD TRAP */
 417                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
 418                if (r)
 419                        return r;
 420
 421                /* UVD ENC TRAP */
 422                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 423                        r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
 424                        if (r)
 425                                return r;
 426                }
 427        }
 428
 429        r = amdgpu_uvd_sw_init(adev);
 430        if (r)
 431                return r;
 432
 433        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 434                const struct common_firmware_header *hdr;
 435                hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 436                adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
 437                adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
 438                adev->firmware.fw_size +=
 439                        ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 440
 441                if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
 442                        adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
 443                        adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
 444                        adev->firmware.fw_size +=
 445                                ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 446                }
 447                DRM_INFO("PSP loading UVD firmware\n");
 448        }
 449
 450        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 451                if (adev->uvd.harvest_config & (1 << j))
 452                        continue;
 453                if (!amdgpu_sriov_vf(adev)) {
 454                        ring = &adev->uvd.inst[j].ring;
 455                        sprintf(ring->name, "uvd_%d", ring->me);
 456                        r = amdgpu_ring_init(adev, ring, 512,
 457                                             &adev->uvd.inst[j].irq, 0,
 458                                             AMDGPU_RING_PRIO_DEFAULT, NULL);
 459                        if (r)
 460                                return r;
 461                }
 462
 463                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 464                        ring = &adev->uvd.inst[j].ring_enc[i];
 465                        sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
 466                        if (amdgpu_sriov_vf(adev)) {
 467                                ring->use_doorbell = true;
 468
 469                                /* currently only use the first enconding ring for
 470                                 * sriov, so set unused location for other unused rings.
 471                                 */
 472                                if (i == 0)
 473                                        ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
 474                                else
 475                                        ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
 476                        }
 477                        r = amdgpu_ring_init(adev, ring, 512,
 478                                             &adev->uvd.inst[j].irq, 0,
 479                                             AMDGPU_RING_PRIO_DEFAULT, NULL);
 480                        if (r)
 481                                return r;
 482                }
 483        }
 484
 485        r = amdgpu_uvd_resume(adev);
 486        if (r)
 487                return r;
 488
 489        r = amdgpu_uvd_entity_init(adev);
 490        if (r)
 491                return r;
 492
 493        r = amdgpu_virt_alloc_mm_table(adev);
 494        if (r)
 495                return r;
 496
 497        return r;
 498}
 499
 500static int uvd_v7_0_sw_fini(void *handle)
 501{
 502        int i, j, r;
 503        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 504
 505        amdgpu_virt_free_mm_table(adev);
 506
 507        r = amdgpu_uvd_suspend(adev);
 508        if (r)
 509                return r;
 510
 511        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 512                if (adev->uvd.harvest_config & (1 << j))
 513                        continue;
 514                for (i = 0; i < adev->uvd.num_enc_rings; ++i)
 515                        amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
 516        }
 517        return amdgpu_uvd_sw_fini(adev);
 518}
 519
 520/**
 521 * uvd_v7_0_hw_init - start and test UVD block
 522 *
 523 * @handle: handle used to pass amdgpu_device pointer
 524 *
 525 * Initialize the hardware, boot up the VCPU and do some testing
 526 */
 527static int uvd_v7_0_hw_init(void *handle)
 528{
 529        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 530        struct amdgpu_ring *ring;
 531        uint32_t tmp;
 532        int i, j, r;
 533
 534        if (amdgpu_sriov_vf(adev))
 535                r = uvd_v7_0_sriov_start(adev);
 536        else
 537                r = uvd_v7_0_start(adev);
 538        if (r)
 539                goto done;
 540
 541        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 542                if (adev->uvd.harvest_config & (1 << j))
 543                        continue;
 544                ring = &adev->uvd.inst[j].ring;
 545
 546                if (!amdgpu_sriov_vf(adev)) {
 547                        r = amdgpu_ring_test_helper(ring);
 548                        if (r)
 549                                goto done;
 550
 551                        r = amdgpu_ring_alloc(ring, 10);
 552                        if (r) {
 553                                DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
 554                                goto done;
 555                        }
 556
 557                        tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 558                                mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
 559                        amdgpu_ring_write(ring, tmp);
 560                        amdgpu_ring_write(ring, 0xFFFFF);
 561
 562                        tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 563                                mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
 564                        amdgpu_ring_write(ring, tmp);
 565                        amdgpu_ring_write(ring, 0xFFFFF);
 566
 567                        tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
 568                                mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
 569                        amdgpu_ring_write(ring, tmp);
 570                        amdgpu_ring_write(ring, 0xFFFFF);
 571
 572                        /* Clear timeout status bits */
 573                        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 574                                mmUVD_SEMA_TIMEOUT_STATUS), 0));
 575                        amdgpu_ring_write(ring, 0x8);
 576
 577                        amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
 578                                mmUVD_SEMA_CNTL), 0));
 579                        amdgpu_ring_write(ring, 3);
 580
 581                        amdgpu_ring_commit(ring);
 582                }
 583
 584                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
 585                        ring = &adev->uvd.inst[j].ring_enc[i];
 586                        r = amdgpu_ring_test_helper(ring);
 587                        if (r)
 588                                goto done;
 589                }
 590        }
 591done:
 592        if (!r)
 593                DRM_INFO("UVD and UVD ENC initialized successfully.\n");
 594
 595        return r;
 596}
 597
 598/**
 599 * uvd_v7_0_hw_fini - stop the hardware block
 600 *
 601 * @handle: handle used to pass amdgpu_device pointer
 602 *
 603 * Stop the UVD block, mark ring as not ready any more
 604 */
 605static int uvd_v7_0_hw_fini(void *handle)
 606{
 607        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 608
 609        /*
 610         * Proper cleanups before halting the HW engine:
 611         *   - cancel the delayed idle work
 612         *   - enable powergating
 613         *   - enable clockgating
 614         *   - disable dpm
 615         *
 616         * TODO: to align with the VCN implementation, move the
 617         * jobs for clockgating/powergating/dpm setting to
 618         * ->set_powergating_state().
 619         */
 620        cancel_delayed_work_sync(&adev->uvd.idle_work);
 621
 622        if (adev->pm.dpm_enabled) {
 623                amdgpu_dpm_enable_uvd(adev, false);
 624        } else {
 625                amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 626                /* shutdown the UVD block */
 627                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 628                                                       AMD_PG_STATE_GATE);
 629                amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
 630                                                       AMD_CG_STATE_GATE);
 631        }
 632
 633        if (!amdgpu_sriov_vf(adev))
 634                uvd_v7_0_stop(adev);
 635        else {
 636                /* full access mode, so don't touch any UVD register */
 637                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
 638        }
 639
 640        return 0;
 641}
 642
 643static int uvd_v7_0_suspend(void *handle)
 644{
 645        int r;
 646        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 647
 648        r = uvd_v7_0_hw_fini(adev);
 649        if (r)
 650                return r;
 651
 652        return amdgpu_uvd_suspend(adev);
 653}
 654
 655static int uvd_v7_0_resume(void *handle)
 656{
 657        int r;
 658        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 659
 660        r = amdgpu_uvd_resume(adev);
 661        if (r)
 662                return r;
 663
 664        return uvd_v7_0_hw_init(adev);
 665}
 666
 667/**
 668 * uvd_v7_0_mc_resume - memory controller programming
 669 *
 670 * @adev: amdgpu_device pointer
 671 *
 672 * Let the UVD memory controller know it's offsets
 673 */
 674static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
 675{
 676        uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
 677        uint32_t offset;
 678        int i;
 679
 680        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 681                if (adev->uvd.harvest_config & (1 << i))
 682                        continue;
 683                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 684                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 685                                i == 0 ?
 686                                adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
 687                                adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
 688                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 689                                i == 0 ?
 690                                adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
 691                                adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
 692                        WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
 693                        offset = 0;
 694                } else {
 695                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
 696                                lower_32_bits(adev->uvd.inst[i].gpu_addr));
 697                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
 698                                upper_32_bits(adev->uvd.inst[i].gpu_addr));
 699                        offset = size;
 700                        WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
 701                                        AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 702                }
 703
 704                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
 705
 706                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
 707                                lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 708                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
 709                                upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 710                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
 711                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
 712
 713                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
 714                                lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 715                WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
 716                                upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 717                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
 718                WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
 719                                AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 720
 721                WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
 722                                adev->gfx.config.gb_addr_config);
 723                WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
 724                                adev->gfx.config.gb_addr_config);
 725                WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
 726                                adev->gfx.config.gb_addr_config);
 727
 728                WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
 729        }
 730}
 731
 732static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
 733                                struct amdgpu_mm_table *table)
 734{
 735        uint32_t data = 0, loop;
 736        uint64_t addr = table->gpu_addr;
 737        struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
 738        uint32_t size;
 739        int i;
 740
 741        size = header->header_size + header->vce_table_size + header->uvd_table_size;
 742
 743        /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
 744        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
 745        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
 746
 747        /* 2, update vmid of descriptor */
 748        data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
 749        data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
 750        data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
 751        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
 752
 753        /* 3, notify mmsch about the size of this descriptor */
 754        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
 755
 756        /* 4, set resp to zero */
 757        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 758
 759        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 760                if (adev->uvd.harvest_config & (1 << i))
 761                        continue;
 762                WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
 763                adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
 764                adev->uvd.inst[i].ring_enc[0].wptr = 0;
 765                adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
 766        }
 767        /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
 768        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
 769
 770        data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 771        loop = 1000;
 772        while ((data & 0x10000002) != 0x10000002) {
 773                udelay(10);
 774                data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
 775                loop--;
 776                if (!loop)
 777                        break;
 778        }
 779
 780        if (!loop) {
 781                dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
 782                return -EBUSY;
 783        }
 784
 785        return 0;
 786}
 787
 788static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
 789{
 790        struct amdgpu_ring *ring;
 791        uint32_t offset, size, tmp;
 792        uint32_t table_size = 0;
 793        struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
 794        struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
 795        struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
 796        struct mmsch_v1_0_cmd_end end = { {0} };
 797        uint32_t *init_table = adev->virt.mm_table.cpu_addr;
 798        struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
 799        uint8_t i = 0;
 800
 801        direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
 802        direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
 803        direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
 804        end.cmd_header.command_type = MMSCH_COMMAND__END;
 805
 806        if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
 807                header->version = MMSCH_VERSION;
 808                header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
 809
 810                if (header->vce_table_offset == 0 && header->vce_table_size == 0)
 811                        header->uvd_table_offset = header->header_size;
 812                else
 813                        header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
 814
 815                init_table += header->uvd_table_offset;
 816
 817                for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
 818                        if (adev->uvd.harvest_config & (1 << i))
 819                                continue;
 820                        ring = &adev->uvd.inst[i].ring;
 821                        ring->wptr = 0;
 822                        size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
 823
 824                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 825                                                           0xFFFFFFFF, 0x00000004);
 826                        /* mc resume*/
 827                        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 828                                MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 829                                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 830                                                        adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
 831                                MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
 832                                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 833                                                        adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
 834                                MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
 835                                offset = 0;
 836                        } else {
 837                                MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
 838                                                            lower_32_bits(adev->uvd.inst[i].gpu_addr));
 839                                MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
 840                                                            upper_32_bits(adev->uvd.inst[i].gpu_addr));
 841                                offset = size;
 842                                MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
 843                                                        AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
 844
 845                        }
 846
 847                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
 848
 849                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
 850                                                    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 851                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
 852                                                    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
 853                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
 854                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
 855
 856                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
 857                                                    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 858                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
 859                                                    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
 860                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
 861                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
 862                                                    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
 863
 864                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
 865                        /* mc resume end*/
 866
 867                        /* disable clock gating */
 868                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
 869                                                           ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
 870
 871                        /* disable interupt */
 872                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 873                                                           ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
 874
 875                        /* stall UMC and register bus before resetting VCPU */
 876                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 877                                                           ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 878                                                           UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 879
 880                        /* put LMI, VCPU, RBC etc... into reset */
 881                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 882                                                    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 883                                                               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 884                                                               UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 885                                                               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
 886                                                               UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
 887                                                               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
 888                                                               UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
 889                                                               UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
 890
 891                        /* initialize UVD memory controller */
 892                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
 893                                                    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
 894                                                               UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
 895                                                               UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
 896                                                               UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
 897                                                               UVD_LMI_CTRL__REQ_MODE_MASK |
 898                                                               0x00100000L));
 899
 900                        /* take all subblocks out of reset, except VCPU */
 901                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
 902                                                    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
 903
 904                        /* enable VCPU clock */
 905                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
 906                                                    UVD_VCPU_CNTL__CLK_EN_MASK);
 907
 908                        /* enable master interrupt */
 909                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
 910                                                           ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
 911                                                           (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 912
 913                        /* clear the bit 4 of UVD_STATUS */
 914                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
 915                                                           ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
 916
 917                        /* force RBC into idle state */
 918                        size = order_base_2(ring->ring_size);
 919                        tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
 920                        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
 921                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
 922
 923                        ring = &adev->uvd.inst[i].ring_enc[0];
 924                        ring->wptr = 0;
 925                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
 926                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
 927                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
 928
 929                        /* boot up the VCPU */
 930                        MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
 931
 932                        /* enable UMC */
 933                        MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
 934                                                                                           ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
 935
 936                        MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
 937                }
 938                /* add end packet */
 939                memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
 940                table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
 941                header->uvd_table_size = table_size;
 942
 943        }
 944        return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
 945}
 946
 947/**
 948 * uvd_v7_0_start - start UVD block
 949 *
 950 * @adev: amdgpu_device pointer
 951 *
 952 * Setup and start the UVD block
 953 */
 954static int uvd_v7_0_start(struct amdgpu_device *adev)
 955{
 956        struct amdgpu_ring *ring;
 957        uint32_t rb_bufsz, tmp;
 958        uint32_t lmi_swap_cntl;
 959        uint32_t mp_swap_cntl;
 960        int i, j, k, r;
 961
 962        for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 963                if (adev->uvd.harvest_config & (1 << k))
 964                        continue;
 965                /* disable DPG */
 966                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
 967                                ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 968        }
 969
 970        /* disable byte swapping */
 971        lmi_swap_cntl = 0;
 972        mp_swap_cntl = 0;
 973
 974        uvd_v7_0_mc_resume(adev);
 975
 976        for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
 977                if (adev->uvd.harvest_config & (1 << k))
 978                        continue;
 979                ring = &adev->uvd.inst[k].ring;
 980                /* disable clock gating */
 981                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
 982                                ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
 983
 984                /* disable interupt */
 985                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
 986                                ~UVD_MASTINT_EN__VCPU_EN_MASK);
 987
 988                /* stall UMC and register bus before resetting VCPU */
 989                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
 990                                UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
 991                                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 992                mdelay(1);
 993
 994                /* put LMI, VCPU, RBC etc... into reset */
 995                WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
 996                        UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
 997                        UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
 998                        UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
 999                        UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
1000                        UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
1001                        UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
1002                        UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
1003                        UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1004                mdelay(5);
1005
1006                /* initialize UVD memory controller */
1007                WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
1008                        (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1009                        UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1010                        UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1011                        UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1012                        UVD_LMI_CTRL__REQ_MODE_MASK |
1013                        0x00100000L);
1014
1015#ifdef __BIG_ENDIAN
1016                /* swap (8 in 32) RB and IB */
1017                lmi_swap_cntl = 0xa;
1018                mp_swap_cntl = 0;
1019#endif
1020                WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1021                WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
1022
1023                WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1024                WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1025                WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1026                WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1027                WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1028                WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1029
1030                /* take all subblocks out of reset, except VCPU */
1031                WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1032                                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1033                mdelay(5);
1034
1035                /* enable VCPU clock */
1036                WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1037                                UVD_VCPU_CNTL__CLK_EN_MASK);
1038
1039                /* enable UMC */
1040                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1041                                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1042
1043                /* boot up the VCPU */
1044                WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1045                mdelay(10);
1046
1047                for (i = 0; i < 10; ++i) {
1048                        uint32_t status;
1049
1050                        for (j = 0; j < 100; ++j) {
1051                                status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1052                                if (status & 2)
1053                                        break;
1054                                mdelay(10);
1055                        }
1056                        r = 0;
1057                        if (status & 2)
1058                                break;
1059
1060                        DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1061                        WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1062                                        UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1063                                        ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1064                        mdelay(10);
1065                        WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1066                                        ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1067                        mdelay(10);
1068                        r = -1;
1069                }
1070
1071                if (r) {
1072                        DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1073                        return r;
1074                }
1075                /* enable master interrupt */
1076                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1077                        (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1078                        ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1079
1080                /* clear the bit 4 of UVD_STATUS */
1081                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1082                                ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1083
1084                /* force RBC into idle state */
1085                rb_bufsz = order_base_2(ring->ring_size);
1086                tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1087                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1088                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1089                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1090                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1091                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1092                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1093
1094                /* set the write pointer delay */
1095                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1096
1097                /* set the wb address */
1098                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1099                                (upper_32_bits(ring->gpu_addr) >> 2));
1100
1101                /* program the RB_BASE for ring buffer */
1102                WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1103                                lower_32_bits(ring->gpu_addr));
1104                WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1105                                upper_32_bits(ring->gpu_addr));
1106
1107                /* Initialize the ring buffer's read and write pointers */
1108                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1109
1110                ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1111                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1112                                lower_32_bits(ring->wptr));
1113
1114                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1115                                ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1116
1117                ring = &adev->uvd.inst[k].ring_enc[0];
1118                WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1119                WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1120                WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1121                WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1122                WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1123
1124                ring = &adev->uvd.inst[k].ring_enc[1];
1125                WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1126                WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1127                WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1128                WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1129                WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1130        }
1131        return 0;
1132}
1133
1134/**
1135 * uvd_v7_0_stop - stop UVD block
1136 *
1137 * @adev: amdgpu_device pointer
1138 *
1139 * stop the UVD block
1140 */
1141static void uvd_v7_0_stop(struct amdgpu_device *adev)
1142{
1143        uint8_t i = 0;
1144
1145        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1146                if (adev->uvd.harvest_config & (1 << i))
1147                        continue;
1148                /* force RBC into idle state */
1149                WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1150
1151                /* Stall UMC and register bus before resetting VCPU */
1152                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1153                                UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1154                                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1155                mdelay(1);
1156
1157                /* put VCPU into reset */
1158                WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1159                                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1160                mdelay(5);
1161
1162                /* disable VCPU clock */
1163                WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1164
1165                /* Unstall UMC and register bus */
1166                WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1167                                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1168        }
1169}
1170
1171/**
1172 * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1173 *
1174 * @ring: amdgpu_ring pointer
1175 * @addr: address
1176 * @seq: sequence number
1177 * @flags: fence related flags
1178 *
1179 * Write a fence and a trap command to the ring.
1180 */
1181static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1182                                     unsigned flags)
1183{
1184        struct amdgpu_device *adev = ring->adev;
1185
1186        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1187
1188        amdgpu_ring_write(ring,
1189                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1190        amdgpu_ring_write(ring, seq);
1191        amdgpu_ring_write(ring,
1192                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1193        amdgpu_ring_write(ring, addr & 0xffffffff);
1194        amdgpu_ring_write(ring,
1195                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1196        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1197        amdgpu_ring_write(ring,
1198                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1199        amdgpu_ring_write(ring, 0);
1200
1201        amdgpu_ring_write(ring,
1202                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1203        amdgpu_ring_write(ring, 0);
1204        amdgpu_ring_write(ring,
1205                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1206        amdgpu_ring_write(ring, 0);
1207        amdgpu_ring_write(ring,
1208                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1209        amdgpu_ring_write(ring, 2);
1210}
1211
1212/**
1213 * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1214 *
1215 * @ring: amdgpu_ring pointer
1216 * @addr: address
1217 * @seq: sequence number
1218 * @flags: fence related flags
1219 *
1220 * Write enc a fence and a trap command to the ring.
1221 */
1222static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1223                        u64 seq, unsigned flags)
1224{
1225
1226        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1227
1228        amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1229        amdgpu_ring_write(ring, addr);
1230        amdgpu_ring_write(ring, upper_32_bits(addr));
1231        amdgpu_ring_write(ring, seq);
1232        amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1233}
1234
1235/**
1236 * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1237 *
1238 * @ring: amdgpu_ring pointer
1239 */
1240static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1241{
1242        /* The firmware doesn't seem to like touching registers at this point. */
1243}
1244
1245/**
1246 * uvd_v7_0_ring_test_ring - register write test
1247 *
1248 * @ring: amdgpu_ring pointer
1249 *
1250 * Test if we can successfully write to the context register
1251 */
1252static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1253{
1254        struct amdgpu_device *adev = ring->adev;
1255        uint32_t tmp = 0;
1256        unsigned i;
1257        int r;
1258
1259        WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1260        r = amdgpu_ring_alloc(ring, 3);
1261        if (r)
1262                return r;
1263
1264        amdgpu_ring_write(ring,
1265                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1266        amdgpu_ring_write(ring, 0xDEADBEEF);
1267        amdgpu_ring_commit(ring);
1268        for (i = 0; i < adev->usec_timeout; i++) {
1269                tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1270                if (tmp == 0xDEADBEEF)
1271                        break;
1272                udelay(1);
1273        }
1274
1275        if (i >= adev->usec_timeout)
1276                r = -ETIMEDOUT;
1277
1278        return r;
1279}
1280
1281/**
1282 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1283 *
1284 * @p: the CS parser with the IBs
1285 * @ib_idx: which IB to patch
1286 *
1287 */
1288static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1289                                           uint32_t ib_idx)
1290{
1291        struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
1292        struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1293        unsigned i;
1294
1295        /* No patching necessary for the first instance */
1296        if (!ring->me)
1297                return 0;
1298
1299        for (i = 0; i < ib->length_dw; i += 2) {
1300                uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1301
1302                reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1303                reg += p->adev->reg_offset[UVD_HWIP][1][1];
1304
1305                amdgpu_set_ib_value(p, ib_idx, i, reg);
1306        }
1307        return 0;
1308}
1309
1310/**
1311 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1312 *
1313 * @ring: amdgpu_ring pointer
1314 * @job: job to retrieve vmid from
1315 * @ib: indirect buffer to execute
1316 * @flags: unused
1317 *
1318 * Write ring commands to execute the indirect buffer
1319 */
1320static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1321                                  struct amdgpu_job *job,
1322                                  struct amdgpu_ib *ib,
1323                                  uint32_t flags)
1324{
1325        struct amdgpu_device *adev = ring->adev;
1326        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1327
1328        amdgpu_ring_write(ring,
1329                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1330        amdgpu_ring_write(ring, vmid);
1331
1332        amdgpu_ring_write(ring,
1333                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1334        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1335        amdgpu_ring_write(ring,
1336                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1337        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1338        amdgpu_ring_write(ring,
1339                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1340        amdgpu_ring_write(ring, ib->length_dw);
1341}
1342
1343/**
1344 * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1345 *
1346 * @ring: amdgpu_ring pointer
1347 * @job: job to retrive vmid from
1348 * @ib: indirect buffer to execute
1349 * @flags: unused
1350 *
1351 * Write enc ring commands to execute the indirect buffer
1352 */
1353static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1354                                        struct amdgpu_job *job,
1355                                        struct amdgpu_ib *ib,
1356                                        uint32_t flags)
1357{
1358        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1359
1360        amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1361        amdgpu_ring_write(ring, vmid);
1362        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1363        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1364        amdgpu_ring_write(ring, ib->length_dw);
1365}
1366
1367static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1368                                    uint32_t reg, uint32_t val)
1369{
1370        struct amdgpu_device *adev = ring->adev;
1371
1372        amdgpu_ring_write(ring,
1373                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1374        amdgpu_ring_write(ring, reg << 2);
1375        amdgpu_ring_write(ring,
1376                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1377        amdgpu_ring_write(ring, val);
1378        amdgpu_ring_write(ring,
1379                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1380        amdgpu_ring_write(ring, 8);
1381}
1382
1383static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1384                                        uint32_t val, uint32_t mask)
1385{
1386        struct amdgpu_device *adev = ring->adev;
1387
1388        amdgpu_ring_write(ring,
1389                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1390        amdgpu_ring_write(ring, reg << 2);
1391        amdgpu_ring_write(ring,
1392                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1393        amdgpu_ring_write(ring, val);
1394        amdgpu_ring_write(ring,
1395                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1396        amdgpu_ring_write(ring, mask);
1397        amdgpu_ring_write(ring,
1398                PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1399        amdgpu_ring_write(ring, 12);
1400}
1401
1402static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1403                                        unsigned vmid, uint64_t pd_addr)
1404{
1405        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1406        uint32_t data0, data1, mask;
1407
1408        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1409
1410        /* wait for reg writes */
1411        data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1412        data1 = lower_32_bits(pd_addr);
1413        mask = 0xffffffff;
1414        uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1415}
1416
1417static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1418{
1419        struct amdgpu_device *adev = ring->adev;
1420        int i;
1421
1422        WARN_ON(ring->wptr % 2 || count % 2);
1423
1424        for (i = 0; i < count / 2; i++) {
1425                amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1426                amdgpu_ring_write(ring, 0);
1427        }
1428}
1429
1430static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1431{
1432        amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1433}
1434
1435static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1436                                            uint32_t reg, uint32_t val,
1437                                            uint32_t mask)
1438{
1439        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1440        amdgpu_ring_write(ring, reg << 2);
1441        amdgpu_ring_write(ring, mask);
1442        amdgpu_ring_write(ring, val);
1443}
1444
1445static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1446                                            unsigned int vmid, uint64_t pd_addr)
1447{
1448        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1449
1450        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1451
1452        /* wait for reg writes */
1453        uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1454                                        vmid * hub->ctx_addr_distance,
1455                                        lower_32_bits(pd_addr), 0xffffffff);
1456}
1457
1458static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1459                                        uint32_t reg, uint32_t val)
1460{
1461        amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1462        amdgpu_ring_write(ring, reg << 2);
1463        amdgpu_ring_write(ring, val);
1464}
1465
1466#if 0
1467static bool uvd_v7_0_is_idle(void *handle)
1468{
1469        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1470
1471        return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1472}
1473
1474static int uvd_v7_0_wait_for_idle(void *handle)
1475{
1476        unsigned i;
1477        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1478
1479        for (i = 0; i < adev->usec_timeout; i++) {
1480                if (uvd_v7_0_is_idle(handle))
1481                        return 0;
1482        }
1483        return -ETIMEDOUT;
1484}
1485
1486#define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1487static bool uvd_v7_0_check_soft_reset(void *handle)
1488{
1489        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490        u32 srbm_soft_reset = 0;
1491        u32 tmp = RREG32(mmSRBM_STATUS);
1492
1493        if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1494            REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1495            (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1496                    AMDGPU_UVD_STATUS_BUSY_MASK))
1497                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1498                                SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1499
1500        if (srbm_soft_reset) {
1501                adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1502                return true;
1503        } else {
1504                adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1505                return false;
1506        }
1507}
1508
1509static int uvd_v7_0_pre_soft_reset(void *handle)
1510{
1511        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1512
1513        if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1514                return 0;
1515
1516        uvd_v7_0_stop(adev);
1517        return 0;
1518}
1519
1520static int uvd_v7_0_soft_reset(void *handle)
1521{
1522        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523        u32 srbm_soft_reset;
1524
1525        if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1526                return 0;
1527        srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1528
1529        if (srbm_soft_reset) {
1530                u32 tmp;
1531
1532                tmp = RREG32(mmSRBM_SOFT_RESET);
1533                tmp |= srbm_soft_reset;
1534                dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1535                WREG32(mmSRBM_SOFT_RESET, tmp);
1536                tmp = RREG32(mmSRBM_SOFT_RESET);
1537
1538                udelay(50);
1539
1540                tmp &= ~srbm_soft_reset;
1541                WREG32(mmSRBM_SOFT_RESET, tmp);
1542                tmp = RREG32(mmSRBM_SOFT_RESET);
1543
1544                /* Wait a little for things to settle down */
1545                udelay(50);
1546        }
1547
1548        return 0;
1549}
1550
1551static int uvd_v7_0_post_soft_reset(void *handle)
1552{
1553        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1554
1555        if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1556                return 0;
1557
1558        mdelay(5);
1559
1560        return uvd_v7_0_start(adev);
1561}
1562#endif
1563
1564static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1565                                        struct amdgpu_irq_src *source,
1566                                        unsigned type,
1567                                        enum amdgpu_interrupt_state state)
1568{
1569        // TODO
1570        return 0;
1571}
1572
1573static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1574                                      struct amdgpu_irq_src *source,
1575                                      struct amdgpu_iv_entry *entry)
1576{
1577        uint32_t ip_instance;
1578
1579        switch (entry->client_id) {
1580        case SOC15_IH_CLIENTID_UVD:
1581                ip_instance = 0;
1582                break;
1583        case SOC15_IH_CLIENTID_UVD1:
1584                ip_instance = 1;
1585                break;
1586        default:
1587                DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1588                return 0;
1589        }
1590
1591        DRM_DEBUG("IH: UVD TRAP\n");
1592
1593        switch (entry->src_id) {
1594        case 124:
1595                amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1596                break;
1597        case 119:
1598                amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1599                break;
1600        case 120:
1601                if (!amdgpu_sriov_vf(adev))
1602                        amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1603                break;
1604        default:
1605                DRM_ERROR("Unhandled interrupt: %d %d\n",
1606                          entry->src_id, entry->src_data[0]);
1607                break;
1608        }
1609
1610        return 0;
1611}
1612
1613#if 0
1614static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1615{
1616        uint32_t data, data1, data2, suvd_flags;
1617
1618        data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1619        data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1620        data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1621
1622        data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1623                  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1624
1625        suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1626                     UVD_SUVD_CGC_GATE__SIT_MASK |
1627                     UVD_SUVD_CGC_GATE__SMP_MASK |
1628                     UVD_SUVD_CGC_GATE__SCM_MASK |
1629                     UVD_SUVD_CGC_GATE__SDB_MASK;
1630
1631        data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1632                (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1633                (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1634
1635        data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1636                        UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1637                        UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1638                        UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1639                        UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1640                        UVD_CGC_CTRL__SYS_MODE_MASK |
1641                        UVD_CGC_CTRL__UDEC_MODE_MASK |
1642                        UVD_CGC_CTRL__MPEG2_MODE_MASK |
1643                        UVD_CGC_CTRL__REGS_MODE_MASK |
1644                        UVD_CGC_CTRL__RBC_MODE_MASK |
1645                        UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1646                        UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1647                        UVD_CGC_CTRL__IDCT_MODE_MASK |
1648                        UVD_CGC_CTRL__MPRD_MODE_MASK |
1649                        UVD_CGC_CTRL__MPC_MODE_MASK |
1650                        UVD_CGC_CTRL__LBSI_MODE_MASK |
1651                        UVD_CGC_CTRL__LRBBM_MODE_MASK |
1652                        UVD_CGC_CTRL__WCB_MODE_MASK |
1653                        UVD_CGC_CTRL__VCPU_MODE_MASK |
1654                        UVD_CGC_CTRL__JPEG_MODE_MASK |
1655                        UVD_CGC_CTRL__JPEG2_MODE_MASK |
1656                        UVD_CGC_CTRL__SCPU_MODE_MASK);
1657        data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1658                        UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1659                        UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1660                        UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1661                        UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1662        data1 |= suvd_flags;
1663
1664        WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1665        WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1666        WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1667        WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1668}
1669
1670static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1671{
1672        uint32_t data, data1, cgc_flags, suvd_flags;
1673
1674        data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1675        data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1676
1677        cgc_flags = UVD_CGC_GATE__SYS_MASK |
1678                UVD_CGC_GATE__UDEC_MASK |
1679                UVD_CGC_GATE__MPEG2_MASK |
1680                UVD_CGC_GATE__RBC_MASK |
1681                UVD_CGC_GATE__LMI_MC_MASK |
1682                UVD_CGC_GATE__IDCT_MASK |
1683                UVD_CGC_GATE__MPRD_MASK |
1684                UVD_CGC_GATE__MPC_MASK |
1685                UVD_CGC_GATE__LBSI_MASK |
1686                UVD_CGC_GATE__LRBBM_MASK |
1687                UVD_CGC_GATE__UDEC_RE_MASK |
1688                UVD_CGC_GATE__UDEC_CM_MASK |
1689                UVD_CGC_GATE__UDEC_IT_MASK |
1690                UVD_CGC_GATE__UDEC_DB_MASK |
1691                UVD_CGC_GATE__UDEC_MP_MASK |
1692                UVD_CGC_GATE__WCB_MASK |
1693                UVD_CGC_GATE__VCPU_MASK |
1694                UVD_CGC_GATE__SCPU_MASK |
1695                UVD_CGC_GATE__JPEG_MASK |
1696                UVD_CGC_GATE__JPEG2_MASK;
1697
1698        suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1699                                UVD_SUVD_CGC_GATE__SIT_MASK |
1700                                UVD_SUVD_CGC_GATE__SMP_MASK |
1701                                UVD_SUVD_CGC_GATE__SCM_MASK |
1702                                UVD_SUVD_CGC_GATE__SDB_MASK;
1703
1704        data |= cgc_flags;
1705        data1 |= suvd_flags;
1706
1707        WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1708        WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1709}
1710
1711static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1712{
1713        u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1714
1715        if (enable)
1716                tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1717                        GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1718        else
1719                tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1720                         GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1721
1722        WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1723}
1724
1725
1726static int uvd_v7_0_set_clockgating_state(void *handle,
1727                                          enum amd_clockgating_state state)
1728{
1729        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730        bool enable = (state == AMD_CG_STATE_GATE);
1731
1732        uvd_v7_0_set_bypass_mode(adev, enable);
1733
1734        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1735                return 0;
1736
1737        if (enable) {
1738                /* disable HW gating and enable Sw gating */
1739                uvd_v7_0_set_sw_clock_gating(adev);
1740        } else {
1741                /* wait for STATUS to clear */
1742                if (uvd_v7_0_wait_for_idle(handle))
1743                        return -EBUSY;
1744
1745                /* enable HW gates because UVD is idle */
1746                /* uvd_v7_0_set_hw_clock_gating(adev); */
1747        }
1748
1749        return 0;
1750}
1751
1752static int uvd_v7_0_set_powergating_state(void *handle,
1753                                          enum amd_powergating_state state)
1754{
1755        /* This doesn't actually powergate the UVD block.
1756         * That's done in the dpm code via the SMC.  This
1757         * just re-inits the block as necessary.  The actual
1758         * gating still happens in the dpm code.  We should
1759         * revisit this when there is a cleaner line between
1760         * the smc and the hw blocks
1761         */
1762        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1763
1764        if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1765                return 0;
1766
1767        WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1768
1769        if (state == AMD_PG_STATE_GATE) {
1770                uvd_v7_0_stop(adev);
1771                return 0;
1772        } else {
1773                return uvd_v7_0_start(adev);
1774        }
1775}
1776#endif
1777
1778static int uvd_v7_0_set_clockgating_state(void *handle,
1779                                          enum amd_clockgating_state state)
1780{
1781        /* needed for driver unload*/
1782        return 0;
1783}
1784
1785const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1786        .name = "uvd_v7_0",
1787        .early_init = uvd_v7_0_early_init,
1788        .late_init = NULL,
1789        .sw_init = uvd_v7_0_sw_init,
1790        .sw_fini = uvd_v7_0_sw_fini,
1791        .hw_init = uvd_v7_0_hw_init,
1792        .hw_fini = uvd_v7_0_hw_fini,
1793        .suspend = uvd_v7_0_suspend,
1794        .resume = uvd_v7_0_resume,
1795        .is_idle = NULL /* uvd_v7_0_is_idle */,
1796        .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1797        .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1798        .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1799        .soft_reset = NULL /* uvd_v7_0_soft_reset */,
1800        .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1801        .set_clockgating_state = uvd_v7_0_set_clockgating_state,
1802        .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1803};
1804
1805static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1806        .type = AMDGPU_RING_TYPE_UVD,
1807        .align_mask = 0xf,
1808        .support_64bit_ptrs = false,
1809        .no_user_fence = true,
1810        .vmhub = AMDGPU_MMHUB_0,
1811        .get_rptr = uvd_v7_0_ring_get_rptr,
1812        .get_wptr = uvd_v7_0_ring_get_wptr,
1813        .set_wptr = uvd_v7_0_ring_set_wptr,
1814        .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1815        .emit_frame_size =
1816                6 + /* hdp invalidate */
1817                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1818                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1819                8 + /* uvd_v7_0_ring_emit_vm_flush */
1820                14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1821        .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1822        .emit_ib = uvd_v7_0_ring_emit_ib,
1823        .emit_fence = uvd_v7_0_ring_emit_fence,
1824        .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1825        .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1826        .test_ring = uvd_v7_0_ring_test_ring,
1827        .test_ib = amdgpu_uvd_ring_test_ib,
1828        .insert_nop = uvd_v7_0_ring_insert_nop,
1829        .pad_ib = amdgpu_ring_generic_pad_ib,
1830        .begin_use = amdgpu_uvd_ring_begin_use,
1831        .end_use = amdgpu_uvd_ring_end_use,
1832        .emit_wreg = uvd_v7_0_ring_emit_wreg,
1833        .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1834        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1835};
1836
1837static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1838        .type = AMDGPU_RING_TYPE_UVD_ENC,
1839        .align_mask = 0x3f,
1840        .nop = HEVC_ENC_CMD_NO_OP,
1841        .support_64bit_ptrs = false,
1842        .no_user_fence = true,
1843        .vmhub = AMDGPU_MMHUB_0,
1844        .get_rptr = uvd_v7_0_enc_ring_get_rptr,
1845        .get_wptr = uvd_v7_0_enc_ring_get_wptr,
1846        .set_wptr = uvd_v7_0_enc_ring_set_wptr,
1847        .emit_frame_size =
1848                3 + 3 + /* hdp flush / invalidate */
1849                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1850                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1851                4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1852                5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1853                1, /* uvd_v7_0_enc_ring_insert_end */
1854        .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1855        .emit_ib = uvd_v7_0_enc_ring_emit_ib,
1856        .emit_fence = uvd_v7_0_enc_ring_emit_fence,
1857        .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1858        .test_ring = uvd_v7_0_enc_ring_test_ring,
1859        .test_ib = uvd_v7_0_enc_ring_test_ib,
1860        .insert_nop = amdgpu_ring_insert_nop,
1861        .insert_end = uvd_v7_0_enc_ring_insert_end,
1862        .pad_ib = amdgpu_ring_generic_pad_ib,
1863        .begin_use = amdgpu_uvd_ring_begin_use,
1864        .end_use = amdgpu_uvd_ring_end_use,
1865        .emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1866        .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1867        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1868};
1869
1870static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1871{
1872        int i;
1873
1874        for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1875                if (adev->uvd.harvest_config & (1 << i))
1876                        continue;
1877                adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1878                adev->uvd.inst[i].ring.me = i;
1879                DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1880        }
1881}
1882
1883static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1884{
1885        int i, j;
1886
1887        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1888                if (adev->uvd.harvest_config & (1 << j))
1889                        continue;
1890                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1891                        adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1892                        adev->uvd.inst[j].ring_enc[i].me = j;
1893                }
1894
1895                DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1896        }
1897}
1898
1899static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1900        .set = uvd_v7_0_set_interrupt_state,
1901        .process = uvd_v7_0_process_interrupt,
1902};
1903
1904static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1905{
1906        int i;
1907
1908        for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1909                if (adev->uvd.harvest_config & (1 << i))
1910                        continue;
1911                adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1912                adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1913        }
1914}
1915
1916const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1917{
1918                .type = AMD_IP_BLOCK_TYPE_UVD,
1919                .major = 7,
1920                .minor = 0,
1921                .rev = 0,
1922                .funcs = &uvd_v7_0_ip_funcs,
1923};
1924