linux/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Christian König <deathsimple@vodafone.de>
  29 */
  30
  31#include <linux/firmware.h>
  32#include <linux/module.h>
  33#include <drm/drmP.h>
  34#include <drm/drm.h>
  35
  36#include "amdgpu.h"
  37#include "amdgpu_pm.h"
  38#include "amdgpu_uvd.h"
  39#include "cikd.h"
  40#include "uvd/uvd_4_2_d.h"
  41
  42/* 1 second timeout */
  43#define UVD_IDLE_TIMEOUT        msecs_to_jiffies(1000)
  44
  45/* Firmware versions for VI */
  46#define FW_1_65_10      ((1 << 24) | (65 << 16) | (10 << 8))
  47#define FW_1_87_11      ((1 << 24) | (87 << 16) | (11 << 8))
  48#define FW_1_87_12      ((1 << 24) | (87 << 16) | (12 << 8))
  49#define FW_1_37_15      ((1 << 24) | (37 << 16) | (15 << 8))
  50
  51/* Polaris10/11 firmware version */
  52#define FW_1_66_16      ((1 << 24) | (66 << 16) | (16 << 8))
  53
  54/* Firmware Names */
  55#ifdef CONFIG_DRM_AMDGPU_CIK
  56#define FIRMWARE_BONAIRE        "radeon/bonaire_uvd.bin"
  57#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
  58#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
  59#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
  60#define FIRMWARE_MULLINS        "radeon/mullins_uvd.bin"
  61#endif
  62#define FIRMWARE_TONGA          "amdgpu/tonga_uvd.bin"
  63#define FIRMWARE_CARRIZO        "amdgpu/carrizo_uvd.bin"
  64#define FIRMWARE_FIJI           "amdgpu/fiji_uvd.bin"
  65#define FIRMWARE_STONEY         "amdgpu/stoney_uvd.bin"
  66#define FIRMWARE_POLARIS10      "amdgpu/polaris10_uvd.bin"
  67#define FIRMWARE_POLARIS11      "amdgpu/polaris11_uvd.bin"
  68#define FIRMWARE_POLARIS12      "amdgpu/polaris12_uvd.bin"
  69
  70/**
  71 * amdgpu_uvd_cs_ctx - Command submission parser context
  72 *
  73 * Used for emulating virtual memory support on UVD 4.2.
  74 */
  75struct amdgpu_uvd_cs_ctx {
  76        struct amdgpu_cs_parser *parser;
  77        unsigned reg, count;
  78        unsigned data0, data1;
  79        unsigned idx;
  80        unsigned ib_idx;
  81
  82        /* does the IB has a msg command */
  83        bool has_msg_cmd;
  84
  85        /* minimum buffer sizes */
  86        unsigned *buf_sizes;
  87};
  88
  89#ifdef CONFIG_DRM_AMDGPU_CIK
  90MODULE_FIRMWARE(FIRMWARE_BONAIRE);
  91MODULE_FIRMWARE(FIRMWARE_KABINI);
  92MODULE_FIRMWARE(FIRMWARE_KAVERI);
  93MODULE_FIRMWARE(FIRMWARE_HAWAII);
  94MODULE_FIRMWARE(FIRMWARE_MULLINS);
  95#endif
  96MODULE_FIRMWARE(FIRMWARE_TONGA);
  97MODULE_FIRMWARE(FIRMWARE_CARRIZO);
  98MODULE_FIRMWARE(FIRMWARE_FIJI);
  99MODULE_FIRMWARE(FIRMWARE_STONEY);
 100MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 101MODULE_FIRMWARE(FIRMWARE_POLARIS11);
 102MODULE_FIRMWARE(FIRMWARE_POLARIS12);
 103
 104static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 105
 106int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 107{
 108        struct amdgpu_ring *ring;
 109        struct amd_sched_rq *rq;
 110        unsigned long bo_size;
 111        const char *fw_name;
 112        const struct common_firmware_header *hdr;
 113        unsigned version_major, version_minor, family_id;
 114        int i, r;
 115
 116        INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
 117
 118        switch (adev->asic_type) {
 119#ifdef CONFIG_DRM_AMDGPU_CIK
 120        case CHIP_BONAIRE:
 121                fw_name = FIRMWARE_BONAIRE;
 122                break;
 123        case CHIP_KABINI:
 124                fw_name = FIRMWARE_KABINI;
 125                break;
 126        case CHIP_KAVERI:
 127                fw_name = FIRMWARE_KAVERI;
 128                break;
 129        case CHIP_HAWAII:
 130                fw_name = FIRMWARE_HAWAII;
 131                break;
 132        case CHIP_MULLINS:
 133                fw_name = FIRMWARE_MULLINS;
 134                break;
 135#endif
 136        case CHIP_TONGA:
 137                fw_name = FIRMWARE_TONGA;
 138                break;
 139        case CHIP_FIJI:
 140                fw_name = FIRMWARE_FIJI;
 141                break;
 142        case CHIP_CARRIZO:
 143                fw_name = FIRMWARE_CARRIZO;
 144                break;
 145        case CHIP_STONEY:
 146                fw_name = FIRMWARE_STONEY;
 147                break;
 148        case CHIP_POLARIS10:
 149                fw_name = FIRMWARE_POLARIS10;
 150                break;
 151        case CHIP_POLARIS11:
 152                fw_name = FIRMWARE_POLARIS11;
 153                break;
 154        case CHIP_POLARIS12:
 155                fw_name = FIRMWARE_POLARIS12;
 156                break;
 157        default:
 158                return -EINVAL;
 159        }
 160
 161        r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
 162        if (r) {
 163                dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
 164                        fw_name);
 165                return r;
 166        }
 167
 168        r = amdgpu_ucode_validate(adev->uvd.fw);
 169        if (r) {
 170                dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
 171                        fw_name);
 172                release_firmware(adev->uvd.fw);
 173                adev->uvd.fw = NULL;
 174                return r;
 175        }
 176
 177        /* Set the default UVD handles that the firmware can handle */
 178        adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
 179
 180        hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 181        family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
 182        version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
 183        version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
 184        DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
 185                version_major, version_minor, family_id);
 186
 187        /*
 188         * Limit the number of UVD handles depending on microcode major
 189         * and minor versions. The firmware version which has 40 UVD
 190         * instances support is 1.80. So all subsequent versions should
 191         * also have the same support.
 192         */
 193        if ((version_major > 0x01) ||
 194            ((version_major == 0x01) && (version_minor >= 0x50)))
 195                adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
 196
 197        adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
 198                                (family_id << 8));
 199
 200        if ((adev->asic_type == CHIP_POLARIS10 ||
 201             adev->asic_type == CHIP_POLARIS11) &&
 202            (adev->uvd.fw_version < FW_1_66_16))
 203                DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
 204                          version_major, version_minor);
 205
 206        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 207                  +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
 208                  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
 209        r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 210                                    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
 211                                    &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
 212        if (r) {
 213                dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 214                return r;
 215        }
 216
 217        ring = &adev->uvd.ring;
 218        rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
 219        r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
 220                                  rq, amdgpu_sched_jobs);
 221        if (r != 0) {
 222                DRM_ERROR("Failed setting up UVD run queue.\n");
 223                return r;
 224        }
 225
 226        for (i = 0; i < adev->uvd.max_handles; ++i) {
 227                atomic_set(&adev->uvd.handles[i], 0);
 228                adev->uvd.filp[i] = NULL;
 229        }
 230
 231        /* from uvd v5.0 HW addressing capacity increased to 64 bits */
 232        if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
 233                adev->uvd.address_64_bit = true;
 234
 235        switch (adev->asic_type) {
 236        case CHIP_TONGA:
 237                adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
 238                break;
 239        case CHIP_CARRIZO:
 240                adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
 241                break;
 242        case CHIP_FIJI:
 243                adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
 244                break;
 245        case CHIP_STONEY:
 246                adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
 247                break;
 248        default:
 249                adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
 250        }
 251
 252        return 0;
 253}
 254
 255int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 256{
 257        kfree(adev->uvd.saved_bo);
 258
 259        amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 260
 261        amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
 262                              &adev->uvd.gpu_addr,
 263                              (void **)&adev->uvd.cpu_addr);
 264
 265        amdgpu_ring_fini(&adev->uvd.ring);
 266
 267        release_firmware(adev->uvd.fw);
 268
 269        return 0;
 270}
 271
 272int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 273{
 274        unsigned size;
 275        void *ptr;
 276        int i;
 277
 278        if (adev->uvd.vcpu_bo == NULL)
 279                return 0;
 280
 281        for (i = 0; i < adev->uvd.max_handles; ++i)
 282                if (atomic_read(&adev->uvd.handles[i]))
 283                        break;
 284
 285        if (i == AMDGPU_MAX_UVD_HANDLES)
 286                return 0;
 287
 288        cancel_delayed_work_sync(&adev->uvd.idle_work);
 289
 290        size = amdgpu_bo_size(adev->uvd.vcpu_bo);
 291        ptr = adev->uvd.cpu_addr;
 292
 293        adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
 294        if (!adev->uvd.saved_bo)
 295                return -ENOMEM;
 296
 297        memcpy_fromio(adev->uvd.saved_bo, ptr, size);
 298
 299        return 0;
 300}
 301
 302int amdgpu_uvd_resume(struct amdgpu_device *adev)
 303{
 304        unsigned size;
 305        void *ptr;
 306
 307        if (adev->uvd.vcpu_bo == NULL)
 308                return -EINVAL;
 309
 310        size = amdgpu_bo_size(adev->uvd.vcpu_bo);
 311        ptr = adev->uvd.cpu_addr;
 312
 313        if (adev->uvd.saved_bo != NULL) {
 314                memcpy_toio(ptr, adev->uvd.saved_bo, size);
 315                kfree(adev->uvd.saved_bo);
 316                adev->uvd.saved_bo = NULL;
 317        } else {
 318                const struct common_firmware_header *hdr;
 319                unsigned offset;
 320
 321                hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
 322                offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
 323                memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
 324                            le32_to_cpu(hdr->ucode_size_bytes));
 325                size -= le32_to_cpu(hdr->ucode_size_bytes);
 326                ptr += le32_to_cpu(hdr->ucode_size_bytes);
 327                memset_io(ptr, 0, size);
 328        }
 329
 330        return 0;
 331}
 332
 333void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 334{
 335        struct amdgpu_ring *ring = &adev->uvd.ring;
 336        int i, r;
 337
 338        for (i = 0; i < adev->uvd.max_handles; ++i) {
 339                uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 340                if (handle != 0 && adev->uvd.filp[i] == filp) {
 341                        struct dma_fence *fence;
 342
 343                        r = amdgpu_uvd_get_destroy_msg(ring, handle,
 344                                                       false, &fence);
 345                        if (r) {
 346                                DRM_ERROR("Error destroying UVD (%d)!\n", r);
 347                                continue;
 348                        }
 349
 350                        dma_fence_wait(fence, false);
 351                        dma_fence_put(fence);
 352
 353                        adev->uvd.filp[i] = NULL;
 354                        atomic_set(&adev->uvd.handles[i], 0);
 355                }
 356        }
 357}
 358
 359static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
 360{
 361        int i;
 362        for (i = 0; i < abo->placement.num_placement; ++i) {
 363                abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
 364                abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
 365        }
 366}
 367
 368static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
 369{
 370        uint32_t lo, hi;
 371        uint64_t addr;
 372
 373        lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
 374        hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
 375        addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
 376
 377        return addr;
 378}
 379
 380/**
 381 * amdgpu_uvd_cs_pass1 - first parsing round
 382 *
 383 * @ctx: UVD parser context
 384 *
 385 * Make sure UVD message and feedback buffers are in VRAM and
 386 * nobody is violating an 256MB boundary.
 387 */
 388static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
 389{
 390        struct amdgpu_bo_va_mapping *mapping;
 391        struct amdgpu_bo *bo;
 392        uint32_t cmd;
 393        uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
 394        int r = 0;
 395
 396        mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
 397        if (mapping == NULL) {
 398                DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
 399                return -EINVAL;
 400        }
 401
 402        if (!ctx->parser->adev->uvd.address_64_bit) {
 403                /* check if it's a message or feedback command */
 404                cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
 405                if (cmd == 0x0 || cmd == 0x3) {
 406                        /* yes, force it into VRAM */
 407                        uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
 408                        amdgpu_ttm_placement_from_domain(bo, domain);
 409                }
 410                amdgpu_uvd_force_into_uvd_segment(bo);
 411
 412                r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 413        }
 414
 415        return r;
 416}
 417
 418/**
 419 * amdgpu_uvd_cs_msg_decode - handle UVD decode message
 420 *
 421 * @msg: pointer to message structure
 422 * @buf_sizes: returned buffer sizes
 423 *
 424 * Peek into the decode message and calculate the necessary buffer sizes.
 425 */
 426static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
 427        unsigned buf_sizes[])
 428{
 429        unsigned stream_type = msg[4];
 430        unsigned width = msg[6];
 431        unsigned height = msg[7];
 432        unsigned dpb_size = msg[9];
 433        unsigned pitch = msg[28];
 434        unsigned level = msg[57];
 435
 436        unsigned width_in_mb = width / 16;
 437        unsigned height_in_mb = ALIGN(height / 16, 2);
 438        unsigned fs_in_mb = width_in_mb * height_in_mb;
 439
 440        unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
 441        unsigned min_ctx_size = ~0;
 442
 443        image_size = width * height;
 444        image_size += image_size / 2;
 445        image_size = ALIGN(image_size, 1024);
 446
 447        switch (stream_type) {
 448        case 0: /* H264 */
 449                switch(level) {
 450                case 30:
 451                        num_dpb_buffer = 8100 / fs_in_mb;
 452                        break;
 453                case 31:
 454                        num_dpb_buffer = 18000 / fs_in_mb;
 455                        break;
 456                case 32:
 457                        num_dpb_buffer = 20480 / fs_in_mb;
 458                        break;
 459                case 41:
 460                        num_dpb_buffer = 32768 / fs_in_mb;
 461                        break;
 462                case 42:
 463                        num_dpb_buffer = 34816 / fs_in_mb;
 464                        break;
 465                case 50:
 466                        num_dpb_buffer = 110400 / fs_in_mb;
 467                        break;
 468                case 51:
 469                        num_dpb_buffer = 184320 / fs_in_mb;
 470                        break;
 471                default:
 472                        num_dpb_buffer = 184320 / fs_in_mb;
 473                        break;
 474                }
 475                num_dpb_buffer++;
 476                if (num_dpb_buffer > 17)
 477                        num_dpb_buffer = 17;
 478
 479                /* reference picture buffer */
 480                min_dpb_size = image_size * num_dpb_buffer;
 481
 482                /* macroblock context buffer */
 483                min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192;
 484
 485                /* IT surface buffer */
 486                min_dpb_size += width_in_mb * height_in_mb * 32;
 487                break;
 488
 489        case 1: /* VC1 */
 490
 491                /* reference picture buffer */
 492                min_dpb_size = image_size * 3;
 493
 494                /* CONTEXT_BUFFER */
 495                min_dpb_size += width_in_mb * height_in_mb * 128;
 496
 497                /* IT surface buffer */
 498                min_dpb_size += width_in_mb * 64;
 499
 500                /* DB surface buffer */
 501                min_dpb_size += width_in_mb * 128;
 502
 503                /* BP */
 504                tmp = max(width_in_mb, height_in_mb);
 505                min_dpb_size += ALIGN(tmp * 7 * 16, 64);
 506                break;
 507
 508        case 3: /* MPEG2 */
 509
 510                /* reference picture buffer */
 511                min_dpb_size = image_size * 3;
 512                break;
 513
 514        case 4: /* MPEG4 */
 515
 516                /* reference picture buffer */
 517                min_dpb_size = image_size * 3;
 518
 519                /* CM */
 520                min_dpb_size += width_in_mb * height_in_mb * 64;
 521
 522                /* IT surface buffer */
 523                min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
 524                break;
 525
 526        case 7: /* H264 Perf */
 527                switch(level) {
 528                case 30:
 529                        num_dpb_buffer = 8100 / fs_in_mb;
 530                        break;
 531                case 31:
 532                        num_dpb_buffer = 18000 / fs_in_mb;
 533                        break;
 534                case 32:
 535                        num_dpb_buffer = 20480 / fs_in_mb;
 536                        break;
 537                case 41:
 538                        num_dpb_buffer = 32768 / fs_in_mb;
 539                        break;
 540                case 42:
 541                        num_dpb_buffer = 34816 / fs_in_mb;
 542                        break;
 543                case 50:
 544                        num_dpb_buffer = 110400 / fs_in_mb;
 545                        break;
 546                case 51:
 547                        num_dpb_buffer = 184320 / fs_in_mb;
 548                        break;
 549                default:
 550                        num_dpb_buffer = 184320 / fs_in_mb;
 551                        break;
 552                }
 553                num_dpb_buffer++;
 554                if (num_dpb_buffer > 17)
 555                        num_dpb_buffer = 17;
 556
 557                /* reference picture buffer */
 558                min_dpb_size = image_size * num_dpb_buffer;
 559
 560                if (!adev->uvd.use_ctx_buf){
 561                        /* macroblock context buffer */
 562                        min_dpb_size +=
 563                                width_in_mb * height_in_mb * num_dpb_buffer * 192;
 564
 565                        /* IT surface buffer */
 566                        min_dpb_size += width_in_mb * height_in_mb * 32;
 567                } else {
 568                        /* macroblock context buffer */
 569                        min_ctx_size =
 570                                width_in_mb * height_in_mb * num_dpb_buffer * 192;
 571                }
 572                break;
 573
 574        case 16: /* H265 */
 575                image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
 576                image_size = ALIGN(image_size, 256);
 577
 578                num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
 579                min_dpb_size = image_size * num_dpb_buffer;
 580                min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
 581                                           * 16 * num_dpb_buffer + 52 * 1024;
 582                break;
 583
 584        default:
 585                DRM_ERROR("UVD codec not handled %d!\n", stream_type);
 586                return -EINVAL;
 587        }
 588
 589        if (width > pitch) {
 590                DRM_ERROR("Invalid UVD decoding target pitch!\n");
 591                return -EINVAL;
 592        }
 593
 594        if (dpb_size < min_dpb_size) {
 595                DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
 596                          dpb_size, min_dpb_size);
 597                return -EINVAL;
 598        }
 599
 600        buf_sizes[0x1] = dpb_size;
 601        buf_sizes[0x2] = image_size;
 602        buf_sizes[0x4] = min_ctx_size;
 603        return 0;
 604}
 605
 606/**
 607 * amdgpu_uvd_cs_msg - handle UVD message
 608 *
 609 * @ctx: UVD parser context
 610 * @bo: buffer object containing the message
 611 * @offset: offset into the buffer object
 612 *
 613 * Peek into the UVD message and extract the session id.
 614 * Make sure that we don't open up to many sessions.
 615 */
 616static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 617                             struct amdgpu_bo *bo, unsigned offset)
 618{
 619        struct amdgpu_device *adev = ctx->parser->adev;
 620        int32_t *msg, msg_type, handle;
 621        void *ptr;
 622        long r;
 623        int i;
 624
 625        if (offset & 0x3F) {
 626                DRM_ERROR("UVD messages must be 64 byte aligned!\n");
 627                return -EINVAL;
 628        }
 629
 630        r = amdgpu_bo_kmap(bo, &ptr);
 631        if (r) {
 632                DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
 633                return r;
 634        }
 635
 636        msg = ptr + offset;
 637
 638        msg_type = msg[1];
 639        handle = msg[2];
 640
 641        if (handle == 0) {
 642                DRM_ERROR("Invalid UVD handle!\n");
 643                return -EINVAL;
 644        }
 645
 646        switch (msg_type) {
 647        case 0:
 648                /* it's a create msg, calc image size (width * height) */
 649                amdgpu_bo_kunmap(bo);
 650
 651                /* try to alloc a new handle */
 652                for (i = 0; i < adev->uvd.max_handles; ++i) {
 653                        if (atomic_read(&adev->uvd.handles[i]) == handle) {
 654                                DRM_ERROR("Handle 0x%x already in use!\n", handle);
 655                                return -EINVAL;
 656                        }
 657
 658                        if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
 659                                adev->uvd.filp[i] = ctx->parser->filp;
 660                                return 0;
 661                        }
 662                }
 663
 664                DRM_ERROR("No more free UVD handles!\n");
 665                return -ENOSPC;
 666
 667        case 1:
 668                /* it's a decode msg, calc buffer sizes */
 669                r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
 670                amdgpu_bo_kunmap(bo);
 671                if (r)
 672                        return r;
 673
 674                /* validate the handle */
 675                for (i = 0; i < adev->uvd.max_handles; ++i) {
 676                        if (atomic_read(&adev->uvd.handles[i]) == handle) {
 677                                if (adev->uvd.filp[i] != ctx->parser->filp) {
 678                                        DRM_ERROR("UVD handle collision detected!\n");
 679                                        return -EINVAL;
 680                                }
 681                                return 0;
 682                        }
 683                }
 684
 685                DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
 686                return -ENOENT;
 687
 688        case 2:
 689                /* it's a destroy msg, free the handle */
 690                for (i = 0; i < adev->uvd.max_handles; ++i)
 691                        atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
 692                amdgpu_bo_kunmap(bo);
 693                return 0;
 694
 695        default:
 696                DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
 697                return -EINVAL;
 698        }
 699        BUG();
 700        return -EINVAL;
 701}
 702
 703/**
 704 * amdgpu_uvd_cs_pass2 - second parsing round
 705 *
 706 * @ctx: UVD parser context
 707 *
 708 * Patch buffer addresses, make sure buffer sizes are correct.
 709 */
 710static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
 711{
 712        struct amdgpu_bo_va_mapping *mapping;
 713        struct amdgpu_bo *bo;
 714        uint32_t cmd;
 715        uint64_t start, end;
 716        uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
 717        int r;
 718
 719        mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
 720        if (mapping == NULL) {
 721                DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
 722                return -EINVAL;
 723        }
 724
 725        start = amdgpu_bo_gpu_offset(bo);
 726
 727        end = (mapping->it.last + 1 - mapping->it.start);
 728        end = end * AMDGPU_GPU_PAGE_SIZE + start;
 729
 730        addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
 731        start += addr;
 732
 733        amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
 734                            lower_32_bits(start));
 735        amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
 736                            upper_32_bits(start));
 737
 738        cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
 739        if (cmd < 0x4) {
 740                if ((end - start) < ctx->buf_sizes[cmd]) {
 741                        DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
 742                                  (unsigned)(end - start),
 743                                  ctx->buf_sizes[cmd]);
 744                        return -EINVAL;
 745                }
 746
 747        } else if (cmd == 0x206) {
 748                if ((end - start) < ctx->buf_sizes[4]) {
 749                        DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
 750                                          (unsigned)(end - start),
 751                                          ctx->buf_sizes[4]);
 752                        return -EINVAL;
 753                }
 754        } else if ((cmd != 0x100) && (cmd != 0x204)) {
 755                DRM_ERROR("invalid UVD command %X!\n", cmd);
 756                return -EINVAL;
 757        }
 758
 759        if (!ctx->parser->adev->uvd.address_64_bit) {
 760                if ((start >> 28) != ((end - 1) >> 28)) {
 761                        DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
 762                                  start, end);
 763                        return -EINVAL;
 764                }
 765
 766                if ((cmd == 0 || cmd == 0x3) &&
 767                    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
 768                        DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
 769                                  start, end);
 770                        return -EINVAL;
 771                }
 772        }
 773
 774        if (cmd == 0) {
 775                ctx->has_msg_cmd = true;
 776                r = amdgpu_uvd_cs_msg(ctx, bo, addr);
 777                if (r)
 778                        return r;
 779        } else if (!ctx->has_msg_cmd) {
 780                DRM_ERROR("Message needed before other commands are send!\n");
 781                return -EINVAL;
 782        }
 783
 784        return 0;
 785}
 786
 787/**
 788 * amdgpu_uvd_cs_reg - parse register writes
 789 *
 790 * @ctx: UVD parser context
 791 * @cb: callback function
 792 *
 793 * Parse the register writes, call cb on each complete command.
 794 */
 795static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
 796                             int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
 797{
 798        struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
 799        int i, r;
 800
 801        ctx->idx++;
 802        for (i = 0; i <= ctx->count; ++i) {
 803                unsigned reg = ctx->reg + i;
 804
 805                if (ctx->idx >= ib->length_dw) {
 806                        DRM_ERROR("Register command after end of CS!\n");
 807                        return -EINVAL;
 808                }
 809
 810                switch (reg) {
 811                case mmUVD_GPCOM_VCPU_DATA0:
 812                        ctx->data0 = ctx->idx;
 813                        break;
 814                case mmUVD_GPCOM_VCPU_DATA1:
 815                        ctx->data1 = ctx->idx;
 816                        break;
 817                case mmUVD_GPCOM_VCPU_CMD:
 818                        r = cb(ctx);
 819                        if (r)
 820                                return r;
 821                        break;
 822                case mmUVD_ENGINE_CNTL:
 823                case mmUVD_NO_OP:
 824                        break;
 825                default:
 826                        DRM_ERROR("Invalid reg 0x%X!\n", reg);
 827                        return -EINVAL;
 828                }
 829                ctx->idx++;
 830        }
 831        return 0;
 832}
 833
 834/**
 835 * amdgpu_uvd_cs_packets - parse UVD packets
 836 *
 837 * @ctx: UVD parser context
 838 * @cb: callback function
 839 *
 840 * Parse the command stream packets.
 841 */
 842static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
 843                                 int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
 844{
 845        struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
 846        int r;
 847
 848        for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
 849                uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
 850                unsigned type = CP_PACKET_GET_TYPE(cmd);
 851                switch (type) {
 852                case PACKET_TYPE0:
 853                        ctx->reg = CP_PACKET0_GET_REG(cmd);
 854                        ctx->count = CP_PACKET_GET_COUNT(cmd);
 855                        r = amdgpu_uvd_cs_reg(ctx, cb);
 856                        if (r)
 857                                return r;
 858                        break;
 859                case PACKET_TYPE2:
 860                        ++ctx->idx;
 861                        break;
 862                default:
 863                        DRM_ERROR("Unknown packet type %d !\n", type);
 864                        return -EINVAL;
 865                }
 866        }
 867        return 0;
 868}
 869
 870/**
 871 * amdgpu_uvd_ring_parse_cs - UVD command submission parser
 872 *
 873 * @parser: Command submission parser context
 874 *
 875 * Parse the command stream, patch in addresses as necessary.
 876 */
 877int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 878{
 879        struct amdgpu_uvd_cs_ctx ctx = {};
 880        unsigned buf_sizes[] = {
 881                [0x00000000]    =       2048,
 882                [0x00000001]    =       0xFFFFFFFF,
 883                [0x00000002]    =       0xFFFFFFFF,
 884                [0x00000003]    =       2048,
 885                [0x00000004]    =       0xFFFFFFFF,
 886        };
 887        struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
 888        int r;
 889
 890        parser->job->vm = NULL;
 891        ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
 892
 893        if (ib->length_dw % 16) {
 894                DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
 895                          ib->length_dw);
 896                return -EINVAL;
 897        }
 898
 899        r = amdgpu_cs_sysvm_access_required(parser);
 900        if (r)
 901                return r;
 902
 903        ctx.parser = parser;
 904        ctx.buf_sizes = buf_sizes;
 905        ctx.ib_idx = ib_idx;
 906
 907        /* first round only required on chips without UVD 64 bit address support */
 908        if (!parser->adev->uvd.address_64_bit) {
 909                /* first round, make sure the buffers are actually in the UVD segment */
 910                r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1);
 911                if (r)
 912                        return r;
 913        }
 914
 915        /* second round, patch buffer addresses into the command stream */
 916        r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2);
 917        if (r)
 918                return r;
 919
 920        if (!ctx.has_msg_cmd) {
 921                DRM_ERROR("UVD-IBs need a msg command!\n");
 922                return -EINVAL;
 923        }
 924
 925        return 0;
 926}
 927
 928static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 929                               bool direct, struct dma_fence **fence)
 930{
 931        struct ttm_validate_buffer tv;
 932        struct ww_acquire_ctx ticket;
 933        struct list_head head;
 934        struct amdgpu_job *job;
 935        struct amdgpu_ib *ib;
 936        struct dma_fence *f = NULL;
 937        struct amdgpu_device *adev = ring->adev;
 938        uint64_t addr;
 939        int i, r;
 940
 941        memset(&tv, 0, sizeof(tv));
 942        tv.bo = &bo->tbo;
 943
 944        INIT_LIST_HEAD(&head);
 945        list_add(&tv.head, &head);
 946
 947        r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
 948        if (r)
 949                return r;
 950
 951        if (!ring->adev->uvd.address_64_bit) {
 952                amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
 953                amdgpu_uvd_force_into_uvd_segment(bo);
 954        }
 955
 956        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 957        if (r)
 958                goto err;
 959
 960        r = amdgpu_job_alloc_with_ib(adev, 64, &job);
 961        if (r)
 962                goto err;
 963
 964        ib = &job->ibs[0];
 965        addr = amdgpu_bo_gpu_offset(bo);
 966        ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
 967        ib->ptr[1] = addr;
 968        ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
 969        ib->ptr[3] = addr >> 32;
 970        ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
 971        ib->ptr[5] = 0;
 972        for (i = 6; i < 16; i += 2) {
 973                ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0);
 974                ib->ptr[i+1] = 0;
 975        }
 976        ib->length_dw = 16;
 977
 978        if (direct) {
 979                r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
 980                job->fence = dma_fence_get(f);
 981                if (r)
 982                        goto err_free;
 983
 984                amdgpu_job_free(job);
 985        } else {
 986                r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
 987                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
 988                if (r)
 989                        goto err_free;
 990        }
 991
 992        ttm_eu_fence_buffer_objects(&ticket, &head, f);
 993
 994        if (fence)
 995                *fence = dma_fence_get(f);
 996        amdgpu_bo_unref(&bo);
 997        dma_fence_put(f);
 998
 999        return 0;
1000
1001err_free:
1002        amdgpu_job_free(job);
1003
1004err:
1005        ttm_eu_backoff_reservation(&ticket, &head);
1006        return r;
1007}
1008
1009/* multiple fence commands without any stream commands in between can
1010   crash the vcpu so just try to emmit a dummy create/destroy msg to
1011   avoid this */
1012int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1013                              struct dma_fence **fence)
1014{
1015        struct amdgpu_device *adev = ring->adev;
1016        struct amdgpu_bo *bo;
1017        uint32_t *msg;
1018        int r, i;
1019
1020        r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
1021                             AMDGPU_GEM_DOMAIN_VRAM,
1022                             AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1023                             AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1024                             NULL, NULL, &bo);
1025        if (r)
1026                return r;
1027
1028        r = amdgpu_bo_reserve(bo, false);
1029        if (r) {
1030                amdgpu_bo_unref(&bo);
1031                return r;
1032        }
1033
1034        r = amdgpu_bo_kmap(bo, (void **)&msg);
1035        if (r) {
1036                amdgpu_bo_unreserve(bo);
1037                amdgpu_bo_unref(&bo);
1038                return r;
1039        }
1040
1041        /* stitch together an UVD create msg */
1042        msg[0] = cpu_to_le32(0x00000de4);
1043        msg[1] = cpu_to_le32(0x00000000);
1044        msg[2] = cpu_to_le32(handle);
1045        msg[3] = cpu_to_le32(0x00000000);
1046        msg[4] = cpu_to_le32(0x00000000);
1047        msg[5] = cpu_to_le32(0x00000000);
1048        msg[6] = cpu_to_le32(0x00000000);
1049        msg[7] = cpu_to_le32(0x00000780);
1050        msg[8] = cpu_to_le32(0x00000440);
1051        msg[9] = cpu_to_le32(0x00000000);
1052        msg[10] = cpu_to_le32(0x01b37000);
1053        for (i = 11; i < 1024; ++i)
1054                msg[i] = cpu_to_le32(0x0);
1055
1056        amdgpu_bo_kunmap(bo);
1057        amdgpu_bo_unreserve(bo);
1058
1059        return amdgpu_uvd_send_msg(ring, bo, true, fence);
1060}
1061
1062int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1063                               bool direct, struct dma_fence **fence)
1064{
1065        struct amdgpu_device *adev = ring->adev;
1066        struct amdgpu_bo *bo;
1067        uint32_t *msg;
1068        int r, i;
1069
1070        r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
1071                             AMDGPU_GEM_DOMAIN_VRAM,
1072                             AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
1073                             AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1074                             NULL, NULL, &bo);
1075        if (r)
1076                return r;
1077
1078        r = amdgpu_bo_reserve(bo, false);
1079        if (r) {
1080                amdgpu_bo_unref(&bo);
1081                return r;
1082        }
1083
1084        r = amdgpu_bo_kmap(bo, (void **)&msg);
1085        if (r) {
1086                amdgpu_bo_unreserve(bo);
1087                amdgpu_bo_unref(&bo);
1088                return r;
1089        }
1090
1091        /* stitch together an UVD destroy msg */
1092        msg[0] = cpu_to_le32(0x00000de4);
1093        msg[1] = cpu_to_le32(0x00000002);
1094        msg[2] = cpu_to_le32(handle);
1095        msg[3] = cpu_to_le32(0x00000000);
1096        for (i = 4; i < 1024; ++i)
1097                msg[i] = cpu_to_le32(0x0);
1098
1099        amdgpu_bo_kunmap(bo);
1100        amdgpu_bo_unreserve(bo);
1101
1102        return amdgpu_uvd_send_msg(ring, bo, direct, fence);
1103}
1104
1105static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1106{
1107        struct amdgpu_device *adev =
1108                container_of(work, struct amdgpu_device, uvd.idle_work.work);
1109        unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1110
1111        if (fences == 0) {
1112                if (adev->pm.dpm_enabled) {
1113                        amdgpu_dpm_enable_uvd(adev, false);
1114                } else {
1115                        amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1116                }
1117        } else {
1118                schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1119        }
1120}
1121
1122void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1123{
1124        struct amdgpu_device *adev = ring->adev;
1125        bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1126
1127        if (set_clocks) {
1128                if (adev->pm.dpm_enabled) {
1129                        amdgpu_dpm_enable_uvd(adev, true);
1130                } else {
1131                        amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
1132                }
1133        }
1134}
1135
1136void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1137{
1138        schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1139}
1140
1141/**
1142 * amdgpu_uvd_ring_test_ib - test ib execution
1143 *
1144 * @ring: amdgpu_ring pointer
1145 *
1146 * Test if we can successfully execute an IB
1147 */
1148int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1149{
1150        struct dma_fence *fence;
1151        long r;
1152
1153        r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1154        if (r) {
1155                DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1156                goto error;
1157        }
1158
1159        r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1160        if (r) {
1161                DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1162                goto error;
1163        }
1164
1165        r = dma_fence_wait_timeout(fence, false, timeout);
1166        if (r == 0) {
1167                DRM_ERROR("amdgpu: IB test timed out.\n");
1168                r = -ETIMEDOUT;
1169        } else if (r < 0) {
1170                DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1171        } else {
1172                DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
1173                r = 0;
1174        }
1175
1176        dma_fence_put(fence);
1177
1178error:
1179        return r;
1180}
1181