linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <drm/drmP.h>
  30#include <drm/drm.h>
  31
  32#include "amdgpu.h"
  33#include "amdgpu_pm.h"
  34#include "amdgpu_vcn.h"
  35#include "soc15d.h"
  36#include "soc15_common.h"
  37
  38#include "vega10/soc15ip.h"
  39#include "raven1/VCN/vcn_1_0_offset.h"
  40
  41/* 1 second timeout */
  42#define VCN_IDLE_TIMEOUT        msecs_to_jiffies(1000)
  43
  44/* Firmware Names */
  45#define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
  46
  47MODULE_FIRMWARE(FIRMWARE_RAVEN);
  48
  49static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  50
  51int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
  52{
  53        struct amdgpu_ring *ring;
  54        struct amd_sched_rq *rq;
  55        unsigned long bo_size;
  56        const char *fw_name;
  57        const struct common_firmware_header *hdr;
  58        unsigned version_major, version_minor, family_id;
  59        int r;
  60
  61        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
  62
  63        switch (adev->asic_type) {
  64        case CHIP_RAVEN:
  65                fw_name = FIRMWARE_RAVEN;
  66                break;
  67        default:
  68                return -EINVAL;
  69        }
  70
  71        r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
  72        if (r) {
  73                dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
  74                        fw_name);
  75                return r;
  76        }
  77
  78        r = amdgpu_ucode_validate(adev->vcn.fw);
  79        if (r) {
  80                dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
  81                        fw_name);
  82                release_firmware(adev->vcn.fw);
  83                adev->vcn.fw = NULL;
  84                return r;
  85        }
  86
  87        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
  88        family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
  89        version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
  90        version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
  91        DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
  92                version_major, version_minor, family_id);
  93
  94
  95        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
  96                  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
  97                  +  AMDGPU_VCN_SESSION_SIZE * 40;
  98        r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
  99                                    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
 100                                    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
 101        if (r) {
 102                dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 103                return r;
 104        }
 105
 106        ring = &adev->vcn.ring_dec;
 107        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
 108        r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
 109                                  rq, amdgpu_sched_jobs);
 110        if (r != 0) {
 111                DRM_ERROR("Failed setting up VCN dec run queue.\n");
 112                return r;
 113        }
 114
 115        ring = &adev->vcn.ring_enc[0];
 116        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
 117        r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
 118                                  rq, amdgpu_sched_jobs);
 119        if (r != 0) {
 120                DRM_ERROR("Failed setting up VCN enc run queue.\n");
 121                return r;
 122        }
 123
 124        return 0;
 125}
 126
 127int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 128{
 129        int i;
 130
 131        kfree(adev->vcn.saved_bo);
 132
 133        amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
 134
 135        amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
 136
 137        amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
 138                              &adev->vcn.gpu_addr,
 139                              (void **)&adev->vcn.cpu_addr);
 140
 141        amdgpu_ring_fini(&adev->vcn.ring_dec);
 142
 143        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 144                amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
 145
 146        release_firmware(adev->vcn.fw);
 147
 148        return 0;
 149}
 150
 151int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 152{
 153        unsigned size;
 154        void *ptr;
 155
 156        if (adev->vcn.vcpu_bo == NULL)
 157                return 0;
 158
 159        cancel_delayed_work_sync(&adev->vcn.idle_work);
 160
 161        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
 162        ptr = adev->vcn.cpu_addr;
 163
 164        adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
 165        if (!adev->vcn.saved_bo)
 166                return -ENOMEM;
 167
 168        memcpy_fromio(adev->vcn.saved_bo, ptr, size);
 169
 170        return 0;
 171}
 172
 173int amdgpu_vcn_resume(struct amdgpu_device *adev)
 174{
 175        unsigned size;
 176        void *ptr;
 177
 178        if (adev->vcn.vcpu_bo == NULL)
 179                return -EINVAL;
 180
 181        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
 182        ptr = adev->vcn.cpu_addr;
 183
 184        if (adev->vcn.saved_bo != NULL) {
 185                memcpy_toio(ptr, adev->vcn.saved_bo, size);
 186                kfree(adev->vcn.saved_bo);
 187                adev->vcn.saved_bo = NULL;
 188        } else {
 189                const struct common_firmware_header *hdr;
 190                unsigned offset;
 191
 192                hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 193                offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 194                memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
 195                            le32_to_cpu(hdr->ucode_size_bytes));
 196                size -= le32_to_cpu(hdr->ucode_size_bytes);
 197                ptr += le32_to_cpu(hdr->ucode_size_bytes);
 198                memset_io(ptr, 0, size);
 199        }
 200
 201        return 0;
 202}
 203
 204static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 205{
 206        struct amdgpu_device *adev =
 207                container_of(work, struct amdgpu_device, vcn.idle_work.work);
 208        unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
 209
 210        if (fences == 0) {
 211                if (adev->pm.dpm_enabled) {
 212                        /* might be used when with pg/cg
 213                        amdgpu_dpm_enable_uvd(adev, false);
 214                        */
 215                }
 216        } else {
 217                schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 218        }
 219}
 220
 221void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 222{
 223        struct amdgpu_device *adev = ring->adev;
 224        bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 225
 226        if (set_clocks && adev->pm.dpm_enabled) {
 227                /* might be used when with pg/cg
 228                amdgpu_dpm_enable_uvd(adev, true);
 229                */
 230        }
 231}
 232
 233void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 234{
 235        schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 236}
 237
 238int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 239{
 240        struct amdgpu_device *adev = ring->adev;
 241        uint32_t tmp = 0;
 242        unsigned i;
 243        int r;
 244
 245        WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
 246        r = amdgpu_ring_alloc(ring, 3);
 247        if (r) {
 248                DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
 249                          ring->idx, r);
 250                return r;
 251        }
 252        amdgpu_ring_write(ring,
 253                PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
 254        amdgpu_ring_write(ring, 0xDEADBEEF);
 255        amdgpu_ring_commit(ring);
 256        for (i = 0; i < adev->usec_timeout; i++) {
 257                tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
 258                if (tmp == 0xDEADBEEF)
 259                        break;
 260                DRM_UDELAY(1);
 261        }
 262
 263        if (i < adev->usec_timeout) {
 264                DRM_INFO("ring test on %d succeeded in %d usecs\n",
 265                         ring->idx, i);
 266        } else {
 267                DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
 268                          ring->idx, tmp);
 269                r = -EINVAL;
 270        }
 271        return r;
 272}
 273
 274static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 275                               bool direct, struct dma_fence **fence)
 276{
 277        struct ttm_validate_buffer tv;
 278        struct ww_acquire_ctx ticket;
 279        struct list_head head;
 280        struct amdgpu_job *job;
 281        struct amdgpu_ib *ib;
 282        struct dma_fence *f = NULL;
 283        struct amdgpu_device *adev = ring->adev;
 284        uint64_t addr;
 285        int i, r;
 286
 287        memset(&tv, 0, sizeof(tv));
 288        tv.bo = &bo->tbo;
 289
 290        INIT_LIST_HEAD(&head);
 291        list_add(&tv.head, &head);
 292
 293        r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
 294        if (r)
 295                return r;
 296
 297        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 298        if (r)
 299                goto err;
 300
 301        r = amdgpu_job_alloc_with_ib(adev, 64, &job);
 302        if (r)
 303                goto err;
 304
 305        ib = &job->ibs[0];
 306        addr = amdgpu_bo_gpu_offset(bo);
 307        ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
 308        ib->ptr[1] = addr;
 309        ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
 310        ib->ptr[3] = addr >> 32;
 311        ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
 312        ib->ptr[5] = 0;
 313        for (i = 6; i < 16; i += 2) {
 314                ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
 315                ib->ptr[i+1] = 0;
 316        }
 317        ib->length_dw = 16;
 318
 319        if (direct) {
 320                r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
 321                job->fence = dma_fence_get(f);
 322                if (r)
 323                        goto err_free;
 324
 325                amdgpu_job_free(job);
 326        } else {
 327                r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
 328                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 329                if (r)
 330                        goto err_free;
 331        }
 332
 333        ttm_eu_fence_buffer_objects(&ticket, &head, f);
 334
 335        if (fence)
 336                *fence = dma_fence_get(f);
 337        amdgpu_bo_unref(&bo);
 338        dma_fence_put(f);
 339
 340        return 0;
 341
 342err_free:
 343        amdgpu_job_free(job);
 344
 345err:
 346        ttm_eu_backoff_reservation(&ticket, &head);
 347        return r;
 348}
 349
 350static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 351                              struct dma_fence **fence)
 352{
 353        struct amdgpu_device *adev = ring->adev;
 354        struct amdgpu_bo *bo;
 355        uint32_t *msg;
 356        int r, i;
 357
 358        r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 359                             AMDGPU_GEM_DOMAIN_VRAM,
 360                             AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 361                             AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 362                             NULL, NULL, 0, &bo);
 363        if (r)
 364                return r;
 365
 366        r = amdgpu_bo_reserve(bo, false);
 367        if (r) {
 368                amdgpu_bo_unref(&bo);
 369                return r;
 370        }
 371
 372        r = amdgpu_bo_kmap(bo, (void **)&msg);
 373        if (r) {
 374                amdgpu_bo_unreserve(bo);
 375                amdgpu_bo_unref(&bo);
 376                return r;
 377        }
 378
 379        msg[0] = cpu_to_le32(0x00000028);
 380        msg[1] = cpu_to_le32(0x00000038);
 381        msg[2] = cpu_to_le32(0x00000001);
 382        msg[3] = cpu_to_le32(0x00000000);
 383        msg[4] = cpu_to_le32(handle);
 384        msg[5] = cpu_to_le32(0x00000000);
 385        msg[6] = cpu_to_le32(0x00000001);
 386        msg[7] = cpu_to_le32(0x00000028);
 387        msg[8] = cpu_to_le32(0x00000010);
 388        msg[9] = cpu_to_le32(0x00000000);
 389        msg[10] = cpu_to_le32(0x00000007);
 390        msg[11] = cpu_to_le32(0x00000000);
 391        msg[12] = cpu_to_le32(0x00000780);
 392        msg[13] = cpu_to_le32(0x00000440);
 393        for (i = 14; i < 1024; ++i)
 394                msg[i] = cpu_to_le32(0x0);
 395
 396        amdgpu_bo_kunmap(bo);
 397        amdgpu_bo_unreserve(bo);
 398
 399        return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
 400}
 401
 402static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 403                               bool direct, struct dma_fence **fence)
 404{
 405        struct amdgpu_device *adev = ring->adev;
 406        struct amdgpu_bo *bo;
 407        uint32_t *msg;
 408        int r, i;
 409
 410        r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
 411                             AMDGPU_GEM_DOMAIN_VRAM,
 412                             AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 413                             AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 414                             NULL, NULL, 0, &bo);
 415        if (r)
 416                return r;
 417
 418        r = amdgpu_bo_reserve(bo, false);
 419        if (r) {
 420                amdgpu_bo_unref(&bo);
 421                return r;
 422        }
 423
 424        r = amdgpu_bo_kmap(bo, (void **)&msg);
 425        if (r) {
 426                amdgpu_bo_unreserve(bo);
 427                amdgpu_bo_unref(&bo);
 428                return r;
 429        }
 430
 431        msg[0] = cpu_to_le32(0x00000028);
 432        msg[1] = cpu_to_le32(0x00000018);
 433        msg[2] = cpu_to_le32(0x00000000);
 434        msg[3] = cpu_to_le32(0x00000002);
 435        msg[4] = cpu_to_le32(handle);
 436        msg[5] = cpu_to_le32(0x00000000);
 437        for (i = 6; i < 1024; ++i)
 438                msg[i] = cpu_to_le32(0x0);
 439
 440        amdgpu_bo_kunmap(bo);
 441        amdgpu_bo_unreserve(bo);
 442
 443        return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
 444}
 445
 446int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 447{
 448        struct dma_fence *fence;
 449        long r;
 450
 451        r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
 452        if (r) {
 453                DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
 454                goto error;
 455        }
 456
 457        r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
 458        if (r) {
 459                DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
 460                goto error;
 461        }
 462
 463        r = dma_fence_wait_timeout(fence, false, timeout);
 464        if (r == 0) {
 465                DRM_ERROR("amdgpu: IB test timed out.\n");
 466                r = -ETIMEDOUT;
 467        } else if (r < 0) {
 468                DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 469        } else {
 470                DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 471                r = 0;
 472        }
 473
 474        dma_fence_put(fence);
 475
 476error:
 477        return r;
 478}
 479
 480int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 481{
 482        struct amdgpu_device *adev = ring->adev;
 483        uint32_t rptr = amdgpu_ring_get_rptr(ring);
 484        unsigned i;
 485        int r;
 486
 487        r = amdgpu_ring_alloc(ring, 16);
 488        if (r) {
 489                DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
 490                          ring->idx, r);
 491                return r;
 492        }
 493        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 494        amdgpu_ring_commit(ring);
 495
 496        for (i = 0; i < adev->usec_timeout; i++) {
 497                if (amdgpu_ring_get_rptr(ring) != rptr)
 498                        break;
 499                DRM_UDELAY(1);
 500        }
 501
 502        if (i < adev->usec_timeout) {
 503                DRM_INFO("ring test on %d succeeded in %d usecs\n",
 504                         ring->idx, i);
 505        } else {
 506                DRM_ERROR("amdgpu: ring %d test failed\n",
 507                          ring->idx);
 508                r = -ETIMEDOUT;
 509        }
 510
 511        return r;
 512}
 513
 514static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 515                              struct dma_fence **fence)
 516{
 517        const unsigned ib_size_dw = 16;
 518        struct amdgpu_job *job;
 519        struct amdgpu_ib *ib;
 520        struct dma_fence *f = NULL;
 521        uint64_t dummy;
 522        int i, r;
 523
 524        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 525        if (r)
 526                return r;
 527
 528        ib = &job->ibs[0];
 529        dummy = ib->gpu_addr + 1024;
 530
 531        ib->length_dw = 0;
 532        ib->ptr[ib->length_dw++] = 0x00000018;
 533        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 534        ib->ptr[ib->length_dw++] = handle;
 535        ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
 536        ib->ptr[ib->length_dw++] = dummy;
 537        ib->ptr[ib->length_dw++] = 0x0000000b;
 538
 539        ib->ptr[ib->length_dw++] = 0x00000014;
 540        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 541        ib->ptr[ib->length_dw++] = 0x0000001c;
 542        ib->ptr[ib->length_dw++] = 0x00000000;
 543        ib->ptr[ib->length_dw++] = 0x00000000;
 544
 545        ib->ptr[ib->length_dw++] = 0x00000008;
 546        ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 547
 548        for (i = ib->length_dw; i < ib_size_dw; ++i)
 549                ib->ptr[i] = 0x0;
 550
 551        r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
 552        job->fence = dma_fence_get(f);
 553        if (r)
 554                goto err;
 555
 556        amdgpu_job_free(job);
 557        if (fence)
 558                *fence = dma_fence_get(f);
 559        dma_fence_put(f);
 560
 561        return 0;
 562
 563err:
 564        amdgpu_job_free(job);
 565        return r;
 566}
 567
 568static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 569                                struct dma_fence **fence)
 570{
 571        const unsigned ib_size_dw = 16;
 572        struct amdgpu_job *job;
 573        struct amdgpu_ib *ib;
 574        struct dma_fence *f = NULL;
 575        uint64_t dummy;
 576        int i, r;
 577
 578        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
 579        if (r)
 580                return r;
 581
 582        ib = &job->ibs[0];
 583        dummy = ib->gpu_addr + 1024;
 584
 585        ib->length_dw = 0;
 586        ib->ptr[ib->length_dw++] = 0x00000018;
 587        ib->ptr[ib->length_dw++] = 0x00000001;
 588        ib->ptr[ib->length_dw++] = handle;
 589        ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
 590        ib->ptr[ib->length_dw++] = dummy;
 591        ib->ptr[ib->length_dw++] = 0x0000000b;
 592
 593        ib->ptr[ib->length_dw++] = 0x00000014;
 594        ib->ptr[ib->length_dw++] = 0x00000002;
 595        ib->ptr[ib->length_dw++] = 0x0000001c;
 596        ib->ptr[ib->length_dw++] = 0x00000000;
 597        ib->ptr[ib->length_dw++] = 0x00000000;
 598
 599        ib->ptr[ib->length_dw++] = 0x00000008;
 600        ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 601
 602        for (i = ib->length_dw; i < ib_size_dw; ++i)
 603                ib->ptr[i] = 0x0;
 604
 605        r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
 606        job->fence = dma_fence_get(f);
 607        if (r)
 608                goto err;
 609
 610        amdgpu_job_free(job);
 611        if (fence)
 612                *fence = dma_fence_get(f);
 613        dma_fence_put(f);
 614
 615        return 0;
 616
 617err:
 618        amdgpu_job_free(job);
 619        return r;
 620}
 621
 622int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 623{
 624        struct dma_fence *fence = NULL;
 625        long r;
 626
 627        r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
 628        if (r) {
 629                DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
 630                goto error;
 631        }
 632
 633        r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
 634        if (r) {
 635                DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
 636                goto error;
 637        }
 638
 639        r = dma_fence_wait_timeout(fence, false, timeout);
 640        if (r == 0) {
 641                DRM_ERROR("amdgpu: IB test timed out.\n");
 642                r = -ETIMEDOUT;
 643        } else if (r < 0) {
 644                DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 645        } else {
 646                DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
 647                r = 0;
 648        }
 649error:
 650        dma_fence_put(fence);
 651        return r;
 652}
 653