linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/firmware.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
  30
  31#include "amdgpu.h"
  32#include "amdgpu_pm.h"
  33#include "amdgpu_vcn.h"
  34#include "soc15d.h"
  35
  36/* Firmware Names */
  37#define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
  38#define FIRMWARE_PICASSO        "amdgpu/picasso_vcn.bin"
  39#define FIRMWARE_RAVEN2         "amdgpu/raven2_vcn.bin"
  40#define FIRMWARE_ARCTURUS       "amdgpu/arcturus_vcn.bin"
  41#define FIRMWARE_RENOIR         "amdgpu/renoir_vcn.bin"
  42#define FIRMWARE_GREEN_SARDINE  "amdgpu/green_sardine_vcn.bin"
  43#define FIRMWARE_NAVI10         "amdgpu/navi10_vcn.bin"
  44#define FIRMWARE_NAVI14         "amdgpu/navi14_vcn.bin"
  45#define FIRMWARE_NAVI12         "amdgpu/navi12_vcn.bin"
  46#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
  47#define FIRMWARE_NAVY_FLOUNDER  "amdgpu/navy_flounder_vcn.bin"
  48#define FIRMWARE_VANGOGH        "amdgpu/vangogh_vcn.bin"
  49#define FIRMWARE_DIMGREY_CAVEFISH       "amdgpu/dimgrey_cavefish_vcn.bin"
  50
  51MODULE_FIRMWARE(FIRMWARE_RAVEN);
  52MODULE_FIRMWARE(FIRMWARE_PICASSO);
  53MODULE_FIRMWARE(FIRMWARE_RAVEN2);
  54MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
  55MODULE_FIRMWARE(FIRMWARE_RENOIR);
  56MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
  57MODULE_FIRMWARE(FIRMWARE_NAVI10);
  58MODULE_FIRMWARE(FIRMWARE_NAVI14);
  59MODULE_FIRMWARE(FIRMWARE_NAVI12);
  60MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
  61MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
  62MODULE_FIRMWARE(FIRMWARE_VANGOGH);
  63MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
  64
  65static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
  66
  67int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
  68{
  69        unsigned long bo_size;
  70        const char *fw_name;
  71        const struct common_firmware_header *hdr;
  72        unsigned char fw_check;
  73        int i, r;
  74
  75        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
  76        mutex_init(&adev->vcn.vcn_pg_lock);
  77        mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
  78        atomic_set(&adev->vcn.total_submission_cnt, 0);
  79        for (i = 0; i < adev->vcn.num_vcn_inst; i++)
  80                atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
  81
  82        switch (adev->asic_type) {
  83        case CHIP_RAVEN:
  84                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
  85                        fw_name = FIRMWARE_RAVEN2;
  86                else if (adev->apu_flags & AMD_APU_IS_PICASSO)
  87                        fw_name = FIRMWARE_PICASSO;
  88                else
  89                        fw_name = FIRMWARE_RAVEN;
  90                break;
  91        case CHIP_ARCTURUS:
  92                fw_name = FIRMWARE_ARCTURUS;
  93                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
  94                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
  95                        adev->vcn.indirect_sram = true;
  96                break;
  97        case CHIP_RENOIR:
  98                if (adev->apu_flags & AMD_APU_IS_RENOIR)
  99                        fw_name = FIRMWARE_RENOIR;
 100                else
 101                        fw_name = FIRMWARE_GREEN_SARDINE;
 102
 103                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 104                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 105                        adev->vcn.indirect_sram = true;
 106                break;
 107        case CHIP_NAVI10:
 108                fw_name = FIRMWARE_NAVI10;
 109                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 110                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 111                        adev->vcn.indirect_sram = true;
 112                break;
 113        case CHIP_NAVI14:
 114                fw_name = FIRMWARE_NAVI14;
 115                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 116                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 117                        adev->vcn.indirect_sram = true;
 118                break;
 119        case CHIP_NAVI12:
 120                fw_name = FIRMWARE_NAVI12;
 121                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 122                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 123                        adev->vcn.indirect_sram = true;
 124                break;
 125        case CHIP_SIENNA_CICHLID:
 126                fw_name = FIRMWARE_SIENNA_CICHLID;
 127                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 128                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 129                        adev->vcn.indirect_sram = true;
 130                break;
 131        case CHIP_NAVY_FLOUNDER:
 132                fw_name = FIRMWARE_NAVY_FLOUNDER;
 133                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 134                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 135                        adev->vcn.indirect_sram = true;
 136                break;
 137        case CHIP_VANGOGH:
 138                fw_name = FIRMWARE_VANGOGH;
 139                break;
 140        case CHIP_DIMGREY_CAVEFISH:
 141                fw_name = FIRMWARE_DIMGREY_CAVEFISH;
 142                if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
 143                    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
 144                        adev->vcn.indirect_sram = true;
 145                break;
 146        default:
 147                return -EINVAL;
 148        }
 149
 150        r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
 151        if (r) {
 152                dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
 153                        fw_name);
 154                return r;
 155        }
 156
 157        r = amdgpu_ucode_validate(adev->vcn.fw);
 158        if (r) {
 159                dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
 160                        fw_name);
 161                release_firmware(adev->vcn.fw);
 162                adev->vcn.fw = NULL;
 163                return r;
 164        }
 165
 166        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 167        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
 168
 169        /* Bit 20-23, it is encode major and non-zero for new naming convention.
 170         * This field is part of version minor and DRM_DISABLED_FLAG in old naming
 171         * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
 172         * is zero in old naming convention, this field is always zero so far.
 173         * These four bits are used to tell which naming convention is present.
 174         */
 175        fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
 176        if (fw_check) {
 177                unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
 178
 179                fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
 180                enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
 181                enc_major = fw_check;
 182                dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
 183                vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
 184                DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
 185                        enc_major, enc_minor, dec_ver, vep, fw_rev);
 186        } else {
 187                unsigned int version_major, version_minor, family_id;
 188
 189                family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 190                version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 191                version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 192                DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
 193                        version_major, version_minor, family_id);
 194        }
 195
 196        bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
 197        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 198                bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 199        bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 200
 201        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 202                if (adev->vcn.harvest_config & (1 << i))
 203                        continue;
 204
 205                r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 206                                                AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
 207                                                &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
 208                if (r) {
 209                        dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
 210                        return r;
 211                }
 212
 213                adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
 214                                bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 215                adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
 216                                bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
 217
 218                if (adev->vcn.indirect_sram) {
 219                        r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
 220                                        AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
 221                                        &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
 222                        if (r) {
 223                                dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
 224                                return r;
 225                        }
 226                }
 227        }
 228
 229        return 0;
 230}
 231
 232int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 233{
 234        int i, j;
 235
 236        cancel_delayed_work_sync(&adev->vcn.idle_work);
 237
 238        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 239                if (adev->vcn.harvest_config & (1 << j))
 240                        continue;
 241
 242                if (adev->vcn.indirect_sram) {
 243                        amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
 244                                                  &adev->vcn.inst[j].dpg_sram_gpu_addr,
 245                                                  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
 246                }
 247                kvfree(adev->vcn.inst[j].saved_bo);
 248
 249                amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
 250                                          &adev->vcn.inst[j].gpu_addr,
 251                                          (void **)&adev->vcn.inst[j].cpu_addr);
 252
 253                amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
 254
 255                for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 256                        amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
 257        }
 258
 259        release_firmware(adev->vcn.fw);
 260        mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
 261        mutex_destroy(&adev->vcn.vcn_pg_lock);
 262
 263        return 0;
 264}
 265
 266int amdgpu_vcn_suspend(struct amdgpu_device *adev)
 267{
 268        unsigned size;
 269        void *ptr;
 270        int i;
 271
 272        cancel_delayed_work_sync(&adev->vcn.idle_work);
 273
 274        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 275                if (adev->vcn.harvest_config & (1 << i))
 276                        continue;
 277                if (adev->vcn.inst[i].vcpu_bo == NULL)
 278                        return 0;
 279
 280                size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 281                ptr = adev->vcn.inst[i].cpu_addr;
 282
 283                adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
 284                if (!adev->vcn.inst[i].saved_bo)
 285                        return -ENOMEM;
 286
 287                memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
 288        }
 289        return 0;
 290}
 291
 292int amdgpu_vcn_resume(struct amdgpu_device *adev)
 293{
 294        unsigned size;
 295        void *ptr;
 296        int i;
 297
 298        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
 299                if (adev->vcn.harvest_config & (1 << i))
 300                        continue;
 301                if (adev->vcn.inst[i].vcpu_bo == NULL)
 302                        return -EINVAL;
 303
 304                size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
 305                ptr = adev->vcn.inst[i].cpu_addr;
 306
 307                if (adev->vcn.inst[i].saved_bo != NULL) {
 308                        memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
 309                        kvfree(adev->vcn.inst[i].saved_bo);
 310                        adev->vcn.inst[i].saved_bo = NULL;
 311                } else {
 312                        const struct common_firmware_header *hdr;
 313                        unsigned offset;
 314
 315                        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
 316                        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 317                                offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 318                                memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
 319                                            le32_to_cpu(hdr->ucode_size_bytes));
 320                                size -= le32_to_cpu(hdr->ucode_size_bytes);
 321                                ptr += le32_to_cpu(hdr->ucode_size_bytes);
 322                        }
 323                        memset_io(ptr, 0, size);
 324                }
 325        }
 326        return 0;
 327}
 328
 329static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
 330{
 331        struct amdgpu_device *adev =
 332                container_of(work, struct amdgpu_device, vcn.idle_work.work);
 333        unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
 334        unsigned int i, j;
 335        int r = 0;
 336
 337        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
 338                if (adev->vcn.harvest_config & (1 << j))
 339                        continue;
 340
 341                for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 342                        fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 343                }
 344
 345                if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
 346                        struct dpg_pause_state new_state;
 347
 348                        if (fence[j] ||
 349                                unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
 350                                new_state.fw_based = VCN_DPG_STATE__PAUSE;
 351                        else
 352                                new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 353
 354                        adev->vcn.pause_dpg_mode(adev, j, &new_state);
 355                }
 356
 357                fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
 358                fences += fence[j];
 359        }
 360
 361        if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
 362                amdgpu_gfx_off_ctrl(adev, true);
 363                amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 364                       AMD_PG_STATE_GATE);
 365                r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 366                                false);
 367                if (r)
 368                        dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
 369        } else {
 370                schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 371        }
 372}
 373
 374void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 375{
 376        struct amdgpu_device *adev = ring->adev;
 377        int r = 0;
 378
 379        atomic_inc(&adev->vcn.total_submission_cnt);
 380
 381        if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
 382                amdgpu_gfx_off_ctrl(adev, false);
 383                r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
 384                                true);
 385                if (r)
 386                        dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
 387        }
 388
 389        mutex_lock(&adev->vcn.vcn_pg_lock);
 390        amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
 391               AMD_PG_STATE_UNGATE);
 392
 393        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
 394                struct dpg_pause_state new_state;
 395
 396                if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
 397                        atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 398                        new_state.fw_based = VCN_DPG_STATE__PAUSE;
 399                } else {
 400                        unsigned int fences = 0;
 401                        unsigned int i;
 402
 403                        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
 404                                fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
 405
 406                        if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
 407                                new_state.fw_based = VCN_DPG_STATE__PAUSE;
 408                        else
 409                                new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 410                }
 411
 412                adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
 413        }
 414        mutex_unlock(&adev->vcn.vcn_pg_lock);
 415}
 416
 417void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
 418{
 419        if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
 420                ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
 421                atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
 422
 423        atomic_dec(&ring->adev->vcn.total_submission_cnt);
 424
 425        schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
 426}
 427
 428int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 429{
 430        struct amdgpu_device *adev = ring->adev;
 431        uint32_t tmp = 0;
 432        unsigned i;
 433        int r;
 434
 435        /* VCN in SRIOV does not support direct register read/write */
 436        if (amdgpu_sriov_vf(adev))
 437                return 0;
 438
 439        WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 440        r = amdgpu_ring_alloc(ring, 3);
 441        if (r)
 442                return r;
 443        amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
 444        amdgpu_ring_write(ring, 0xDEADBEEF);
 445        amdgpu_ring_commit(ring);
 446        for (i = 0; i < adev->usec_timeout; i++) {
 447                tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
 448                if (tmp == 0xDEADBEEF)
 449                        break;
 450                udelay(1);
 451        }
 452
 453        if (i >= adev->usec_timeout)
 454                r = -ETIMEDOUT;
 455
 456        return r;
 457}
 458
 459int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
 460{
 461        struct amdgpu_device *adev = ring->adev;
 462        uint32_t rptr;
 463        unsigned int i;
 464        int r;
 465
 466        if (amdgpu_sriov_vf(adev))
 467                return 0;
 468
 469        r = amdgpu_ring_alloc(ring, 16);
 470        if (r)
 471                return r;
 472
 473        rptr = amdgpu_ring_get_rptr(ring);
 474
 475        amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
 476        amdgpu_ring_commit(ring);
 477
 478        for (i = 0; i < adev->usec_timeout; i++) {
 479                if (amdgpu_ring_get_rptr(ring) != rptr)
 480                        break;
 481                udelay(1);
 482        }
 483
 484        if (i >= adev->usec_timeout)
 485                r = -ETIMEDOUT;
 486
 487        return r;
 488}
 489
 490static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
 491                                   struct amdgpu_bo *bo,
 492                                   struct dma_fence **fence)
 493{
 494        struct amdgpu_device *adev = ring->adev;
 495        struct dma_fence *f = NULL;
 496        struct amdgpu_job *job;
 497        struct amdgpu_ib *ib;
 498        uint64_t addr;
 499        int i, r;
 500
 501        r = amdgpu_job_alloc_with_ib(adev, 64,
 502                                        AMDGPU_IB_POOL_DIRECT, &job);
 503        if (r)
 504                goto err;
 505
 506        ib = &job->ibs[0];
 507        addr = amdgpu_bo_gpu_offset(bo);
 508        ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
 509        ib->ptr[1] = addr;
 510        ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
 511        ib->ptr[3] = addr >> 32;
 512        ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
 513        ib->ptr[5] = 0;
 514        for (i = 6; i < 16; i += 2) {
 515                ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
 516                ib->ptr[i+1] = 0;
 517        }
 518        ib->length_dw = 16;
 519
 520        r = amdgpu_job_submit_direct(job, ring, &f);
 521        if (r)
 522                goto err_free;
 523
 524        amdgpu_bo_fence(bo, f, false);
 525        amdgpu_bo_unreserve(bo);
 526        amdgpu_bo_unref(&bo);
 527
 528        if (fence)
 529                *fence = dma_fence_get(f);
 530        dma_fence_put(f);
 531
 532        return 0;
 533
 534err_free:
 535        amdgpu_job_free(job);
 536
 537err:
 538        amdgpu_bo_unreserve(bo);
 539        amdgpu_bo_unref(&bo);
 540        return r;
 541}
 542
 543static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 544                                         struct amdgpu_bo **bo)
 545{
 546        struct amdgpu_device *adev = ring->adev;
 547        uint32_t *msg;
 548        int r, i;
 549
 550        *bo = NULL;
 551        r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
 552                                      AMDGPU_GEM_DOMAIN_VRAM,
 553                                      bo, NULL, (void **)&msg);
 554        if (r)
 555                return r;
 556
 557        msg[0] = cpu_to_le32(0x00000028);
 558        msg[1] = cpu_to_le32(0x00000038);
 559        msg[2] = cpu_to_le32(0x00000001);
 560        msg[3] = cpu_to_le32(0x00000000);
 561        msg[4] = cpu_to_le32(handle);
 562        msg[5] = cpu_to_le32(0x00000000);
 563        msg[6] = cpu_to_le32(0x00000001);
 564        msg[7] = cpu_to_le32(0x00000028);
 565        msg[8] = cpu_to_le32(0x00000010);
 566        msg[9] = cpu_to_le32(0x00000000);
 567        msg[10] = cpu_to_le32(0x00000007);
 568        msg[11] = cpu_to_le32(0x00000000);
 569        msg[12] = cpu_to_le32(0x00000780);
 570        msg[13] = cpu_to_le32(0x00000440);
 571        for (i = 14; i < 1024; ++i)
 572                msg[i] = cpu_to_le32(0x0);
 573
 574        return 0;
 575}
 576
 577static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 578                                          struct amdgpu_bo **bo)
 579{
 580        struct amdgpu_device *adev = ring->adev;
 581        uint32_t *msg;
 582        int r, i;
 583
 584        *bo = NULL;
 585        r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
 586                                      AMDGPU_GEM_DOMAIN_VRAM,
 587                                      bo, NULL, (void **)&msg);
 588        if (r)
 589                return r;
 590
 591        msg[0] = cpu_to_le32(0x00000028);
 592        msg[1] = cpu_to_le32(0x00000018);
 593        msg[2] = cpu_to_le32(0x00000000);
 594        msg[3] = cpu_to_le32(0x00000002);
 595        msg[4] = cpu_to_le32(handle);
 596        msg[5] = cpu_to_le32(0x00000000);
 597        for (i = 6; i < 1024; ++i)
 598                msg[i] = cpu_to_le32(0x0);
 599
 600        return 0;
 601}
 602
 603int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 604{
 605        struct dma_fence *fence = NULL;
 606        struct amdgpu_bo *bo;
 607        long r;
 608
 609        r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
 610        if (r)
 611                goto error;
 612
 613        r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
 614        if (r)
 615                goto error;
 616        r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
 617        if (r)
 618                goto error;
 619
 620        r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
 621        if (r)
 622                goto error;
 623
 624        r = dma_fence_wait_timeout(fence, false, timeout);
 625        if (r == 0)
 626                r = -ETIMEDOUT;
 627        else if (r > 0)
 628                r = 0;
 629
 630        dma_fence_put(fence);
 631error:
 632        return r;
 633}
 634
 635static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 636                                   struct amdgpu_bo *bo,
 637                                   struct dma_fence **fence)
 638{
 639        struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
 640        const unsigned int ib_size_dw = 64;
 641        struct amdgpu_device *adev = ring->adev;
 642        struct dma_fence *f = NULL;
 643        struct amdgpu_job *job;
 644        struct amdgpu_ib *ib;
 645        uint64_t addr;
 646        int i, r;
 647
 648        r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
 649                                AMDGPU_IB_POOL_DIRECT, &job);
 650        if (r)
 651                goto err;
 652
 653        ib = &job->ibs[0];
 654        addr = amdgpu_bo_gpu_offset(bo);
 655        ib->length_dw = 0;
 656
 657        ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 658        ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 659        decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
 660        ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
 661        memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
 662
 663        decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
 664        decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
 665        decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
 666
 667        for (i = ib->length_dw; i < ib_size_dw; ++i)
 668                ib->ptr[i] = 0x0;
 669
 670        r = amdgpu_job_submit_direct(job, ring, &f);
 671        if (r)
 672                goto err_free;
 673
 674        amdgpu_bo_fence(bo, f, false);
 675        amdgpu_bo_unreserve(bo);
 676        amdgpu_bo_unref(&bo);
 677
 678        if (fence)
 679                *fence = dma_fence_get(f);
 680        dma_fence_put(f);
 681
 682        return 0;
 683
 684err_free:
 685        amdgpu_job_free(job);
 686
 687err:
 688        amdgpu_bo_unreserve(bo);
 689        amdgpu_bo_unref(&bo);
 690        return r;
 691}
 692
 693int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 694{
 695        struct dma_fence *fence = NULL;
 696        struct amdgpu_bo *bo;
 697        long r;
 698
 699        r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
 700        if (r)
 701                goto error;
 702
 703        r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
 704        if (r)
 705                goto error;
 706        r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
 707        if (r)
 708                goto error;
 709
 710        r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
 711        if (r)
 712                goto error;
 713
 714        r = dma_fence_wait_timeout(fence, false, timeout);
 715        if (r == 0)
 716                r = -ETIMEDOUT;
 717        else if (r > 0)
 718                r = 0;
 719
 720        dma_fence_put(fence);
 721error:
 722        return r;
 723}
 724
 725int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 726{
 727        struct amdgpu_device *adev = ring->adev;
 728        uint32_t rptr;
 729        unsigned i;
 730        int r;
 731
 732        if (amdgpu_sriov_vf(adev))
 733                return 0;
 734
 735        r = amdgpu_ring_alloc(ring, 16);
 736        if (r)
 737                return r;
 738
 739        rptr = amdgpu_ring_get_rptr(ring);
 740
 741        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
 742        amdgpu_ring_commit(ring);
 743
 744        for (i = 0; i < adev->usec_timeout; i++) {
 745                if (amdgpu_ring_get_rptr(ring) != rptr)
 746                        break;
 747                udelay(1);
 748        }
 749
 750        if (i >= adev->usec_timeout)
 751                r = -ETIMEDOUT;
 752
 753        return r;
 754}
 755
 756static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 757                                         struct amdgpu_bo *bo,
 758                                         struct dma_fence **fence)
 759{
 760        const unsigned ib_size_dw = 16;
 761        struct amdgpu_job *job;
 762        struct amdgpu_ib *ib;
 763        struct dma_fence *f = NULL;
 764        uint64_t addr;
 765        int i, r;
 766
 767        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 768                                        AMDGPU_IB_POOL_DIRECT, &job);
 769        if (r)
 770                return r;
 771
 772        ib = &job->ibs[0];
 773        addr = amdgpu_bo_gpu_offset(bo);
 774
 775        ib->length_dw = 0;
 776        ib->ptr[ib->length_dw++] = 0x00000018;
 777        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 778        ib->ptr[ib->length_dw++] = handle;
 779        ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 780        ib->ptr[ib->length_dw++] = addr;
 781        ib->ptr[ib->length_dw++] = 0x0000000b;
 782
 783        ib->ptr[ib->length_dw++] = 0x00000014;
 784        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
 785        ib->ptr[ib->length_dw++] = 0x0000001c;
 786        ib->ptr[ib->length_dw++] = 0x00000000;
 787        ib->ptr[ib->length_dw++] = 0x00000000;
 788
 789        ib->ptr[ib->length_dw++] = 0x00000008;
 790        ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
 791
 792        for (i = ib->length_dw; i < ib_size_dw; ++i)
 793                ib->ptr[i] = 0x0;
 794
 795        r = amdgpu_job_submit_direct(job, ring, &f);
 796        if (r)
 797                goto err;
 798
 799        if (fence)
 800                *fence = dma_fence_get(f);
 801        dma_fence_put(f);
 802
 803        return 0;
 804
 805err:
 806        amdgpu_job_free(job);
 807        return r;
 808}
 809
 810static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 811                                          struct amdgpu_bo *bo,
 812                                          struct dma_fence **fence)
 813{
 814        const unsigned ib_size_dw = 16;
 815        struct amdgpu_job *job;
 816        struct amdgpu_ib *ib;
 817        struct dma_fence *f = NULL;
 818        uint64_t addr;
 819        int i, r;
 820
 821        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 822                                        AMDGPU_IB_POOL_DIRECT, &job);
 823        if (r)
 824                return r;
 825
 826        ib = &job->ibs[0];
 827        addr = amdgpu_bo_gpu_offset(bo);
 828
 829        ib->length_dw = 0;
 830        ib->ptr[ib->length_dw++] = 0x00000018;
 831        ib->ptr[ib->length_dw++] = 0x00000001;
 832        ib->ptr[ib->length_dw++] = handle;
 833        ib->ptr[ib->length_dw++] = upper_32_bits(addr);
 834        ib->ptr[ib->length_dw++] = addr;
 835        ib->ptr[ib->length_dw++] = 0x0000000b;
 836
 837        ib->ptr[ib->length_dw++] = 0x00000014;
 838        ib->ptr[ib->length_dw++] = 0x00000002;
 839        ib->ptr[ib->length_dw++] = 0x0000001c;
 840        ib->ptr[ib->length_dw++] = 0x00000000;
 841        ib->ptr[ib->length_dw++] = 0x00000000;
 842
 843        ib->ptr[ib->length_dw++] = 0x00000008;
 844        ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
 845
 846        for (i = ib->length_dw; i < ib_size_dw; ++i)
 847                ib->ptr[i] = 0x0;
 848
 849        r = amdgpu_job_submit_direct(job, ring, &f);
 850        if (r)
 851                goto err;
 852
 853        if (fence)
 854                *fence = dma_fence_get(f);
 855        dma_fence_put(f);
 856
 857        return 0;
 858
 859err:
 860        amdgpu_job_free(job);
 861        return r;
 862}
 863
 864int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 865{
 866        struct dma_fence *fence = NULL;
 867        struct amdgpu_bo *bo = NULL;
 868        long r;
 869
 870        r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 871                                      AMDGPU_GEM_DOMAIN_VRAM,
 872                                      &bo, NULL, NULL);
 873        if (r)
 874                return r;
 875
 876        r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
 877        if (r)
 878                goto error;
 879
 880        r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
 881        if (r)
 882                goto error;
 883
 884        r = dma_fence_wait_timeout(fence, false, timeout);
 885        if (r == 0)
 886                r = -ETIMEDOUT;
 887        else if (r > 0)
 888                r = 0;
 889
 890error:
 891        dma_fence_put(fence);
 892        amdgpu_bo_unreserve(bo);
 893        amdgpu_bo_unref(&bo);
 894        return r;
 895}
 896