linux/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the "Software"),
   7 * to deal in the Software without restriction, including without limitation
   8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9 * and/or sell copies of the Software, and to permit persons to whom the
  10 * Software is furnished to do so, subject to the following conditions:
  11 *
  12 * The above copyright notice and this permission notice (including the next
  13 * paragraph) shall be included in all copies or substantial portions of the
  14 * Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22 * DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors:
  25 *    Jerome Glisse <glisse@freedesktop.org>
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/pagemap.h>
  30#include <linux/sync_file.h>
  31#include <linux/dma-buf.h>
  32
  33#include <drm/amdgpu_drm.h>
  34#include <drm/drm_syncobj.h>
  35#include "amdgpu.h"
  36#include "amdgpu_trace.h"
  37#include "amdgpu_gmc.h"
  38#include "amdgpu_gem.h"
  39#include "amdgpu_ras.h"
  40
  41static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
  42                                      struct drm_amdgpu_cs_chunk_fence *data,
  43                                      uint32_t *offset)
  44{
  45        struct drm_gem_object *gobj;
  46        struct amdgpu_bo *bo;
  47        unsigned long size;
  48        int r;
  49
  50        gobj = drm_gem_object_lookup(p->filp, data->handle);
  51        if (gobj == NULL)
  52                return -EINVAL;
  53
  54        bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
  55        p->uf_entry.priority = 0;
  56        p->uf_entry.tv.bo = &bo->tbo;
  57        /* One for TTM and one for the CS job */
  58        p->uf_entry.tv.num_shared = 2;
  59
  60        drm_gem_object_put(gobj);
  61
  62        size = amdgpu_bo_size(bo);
  63        if (size != PAGE_SIZE || (data->offset + 8) > size) {
  64                r = -EINVAL;
  65                goto error_unref;
  66        }
  67
  68        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
  69                r = -EINVAL;
  70                goto error_unref;
  71        }
  72
  73        *offset = data->offset;
  74
  75        return 0;
  76
  77error_unref:
  78        amdgpu_bo_unref(&bo);
  79        return r;
  80}
  81
  82static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
  83                                      struct drm_amdgpu_bo_list_in *data)
  84{
  85        int r;
  86        struct drm_amdgpu_bo_list_entry *info = NULL;
  87
  88        r = amdgpu_bo_create_list_entry_array(data, &info);
  89        if (r)
  90                return r;
  91
  92        r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
  93                                  &p->bo_list);
  94        if (r)
  95                goto error_free;
  96
  97        kvfree(info);
  98        return 0;
  99
 100error_free:
 101        kvfree(info);
 102
 103        return r;
 104}
 105
 106static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
 107{
 108        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 109        struct amdgpu_vm *vm = &fpriv->vm;
 110        uint64_t *chunk_array_user;
 111        uint64_t *chunk_array;
 112        unsigned size, num_ibs = 0;
 113        uint32_t uf_offset = 0;
 114        int i;
 115        int ret;
 116
 117        if (cs->in.num_chunks == 0)
 118                return 0;
 119
 120        chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 121        if (!chunk_array)
 122                return -ENOMEM;
 123
 124        p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
 125        if (!p->ctx) {
 126                ret = -EINVAL;
 127                goto free_chunk;
 128        }
 129
 130        mutex_lock(&p->ctx->lock);
 131
 132        /* skip guilty context job */
 133        if (atomic_read(&p->ctx->guilty) == 1) {
 134                ret = -ECANCELED;
 135                goto free_chunk;
 136        }
 137
 138        /* get chunks */
 139        chunk_array_user = u64_to_user_ptr(cs->in.chunks);
 140        if (copy_from_user(chunk_array, chunk_array_user,
 141                           sizeof(uint64_t)*cs->in.num_chunks)) {
 142                ret = -EFAULT;
 143                goto free_chunk;
 144        }
 145
 146        p->nchunks = cs->in.num_chunks;
 147        p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 148                            GFP_KERNEL);
 149        if (!p->chunks) {
 150                ret = -ENOMEM;
 151                goto free_chunk;
 152        }
 153
 154        for (i = 0; i < p->nchunks; i++) {
 155                struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
 156                struct drm_amdgpu_cs_chunk user_chunk;
 157                uint32_t __user *cdata;
 158
 159                chunk_ptr = u64_to_user_ptr(chunk_array[i]);
 160                if (copy_from_user(&user_chunk, chunk_ptr,
 161                                       sizeof(struct drm_amdgpu_cs_chunk))) {
 162                        ret = -EFAULT;
 163                        i--;
 164                        goto free_partial_kdata;
 165                }
 166                p->chunks[i].chunk_id = user_chunk.chunk_id;
 167                p->chunks[i].length_dw = user_chunk.length_dw;
 168
 169                size = p->chunks[i].length_dw;
 170                cdata = u64_to_user_ptr(user_chunk.chunk_data);
 171
 172                p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
 173                if (p->chunks[i].kdata == NULL) {
 174                        ret = -ENOMEM;
 175                        i--;
 176                        goto free_partial_kdata;
 177                }
 178                size *= sizeof(uint32_t);
 179                if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
 180                        ret = -EFAULT;
 181                        goto free_partial_kdata;
 182                }
 183
 184                switch (p->chunks[i].chunk_id) {
 185                case AMDGPU_CHUNK_ID_IB:
 186                        ++num_ibs;
 187                        break;
 188
 189                case AMDGPU_CHUNK_ID_FENCE:
 190                        size = sizeof(struct drm_amdgpu_cs_chunk_fence);
 191                        if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 192                                ret = -EINVAL;
 193                                goto free_partial_kdata;
 194                        }
 195
 196                        ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
 197                                                         &uf_offset);
 198                        if (ret)
 199                                goto free_partial_kdata;
 200
 201                        break;
 202
 203                case AMDGPU_CHUNK_ID_BO_HANDLES:
 204                        size = sizeof(struct drm_amdgpu_bo_list_in);
 205                        if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
 206                                ret = -EINVAL;
 207                                goto free_partial_kdata;
 208                        }
 209
 210                        ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
 211                        if (ret)
 212                                goto free_partial_kdata;
 213
 214                        break;
 215
 216                case AMDGPU_CHUNK_ID_DEPENDENCIES:
 217                case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
 218                case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
 219                case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
 220                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
 221                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
 222                        break;
 223
 224                default:
 225                        ret = -EINVAL;
 226                        goto free_partial_kdata;
 227                }
 228        }
 229
 230        ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
 231        if (ret)
 232                goto free_all_kdata;
 233
 234        if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
 235                ret = -ECANCELED;
 236                goto free_all_kdata;
 237        }
 238
 239        if (p->uf_entry.tv.bo)
 240                p->job->uf_addr = uf_offset;
 241        kvfree(chunk_array);
 242
 243        /* Use this opportunity to fill in task info for the vm */
 244        amdgpu_vm_set_task_info(vm);
 245
 246        return 0;
 247
 248free_all_kdata:
 249        i = p->nchunks - 1;
 250free_partial_kdata:
 251        for (; i >= 0; i--)
 252                kvfree(p->chunks[i].kdata);
 253        kvfree(p->chunks);
 254        p->chunks = NULL;
 255        p->nchunks = 0;
 256free_chunk:
 257        kvfree(chunk_array);
 258
 259        return ret;
 260}
 261
 262/* Convert microseconds to bytes. */
 263static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
 264{
 265        if (us <= 0 || !adev->mm_stats.log2_max_MBps)
 266                return 0;
 267
 268        /* Since accum_us is incremented by a million per second, just
 269         * multiply it by the number of MB/s to get the number of bytes.
 270         */
 271        return us << adev->mm_stats.log2_max_MBps;
 272}
 273
 274static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
 275{
 276        if (!adev->mm_stats.log2_max_MBps)
 277                return 0;
 278
 279        return bytes >> adev->mm_stats.log2_max_MBps;
 280}
 281
 282/* Returns how many bytes TTM can move right now. If no bytes can be moved,
 283 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
 284 * which means it can go over the threshold once. If that happens, the driver
 285 * will be in debt and no other buffer migrations can be done until that debt
 286 * is repaid.
 287 *
 288 * This approach allows moving a buffer of any size (it's important to allow
 289 * that).
 290 *
 291 * The currency is simply time in microseconds and it increases as the clock
 292 * ticks. The accumulated microseconds (us) are converted to bytes and
 293 * returned.
 294 */
 295static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
 296                                              u64 *max_bytes,
 297                                              u64 *max_vis_bytes)
 298{
 299        s64 time_us, increment_us;
 300        u64 free_vram, total_vram, used_vram;
 301        struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 302        /* Allow a maximum of 200 accumulated ms. This is basically per-IB
 303         * throttling.
 304         *
 305         * It means that in order to get full max MBps, at least 5 IBs per
 306         * second must be submitted and not more than 200ms apart from each
 307         * other.
 308         */
 309        const s64 us_upper_bound = 200000;
 310
 311        if (!adev->mm_stats.log2_max_MBps) {
 312                *max_bytes = 0;
 313                *max_vis_bytes = 0;
 314                return;
 315        }
 316
 317        total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
 318        used_vram = amdgpu_vram_mgr_usage(vram_man);
 319        free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 320
 321        spin_lock(&adev->mm_stats.lock);
 322
 323        /* Increase the amount of accumulated us. */
 324        time_us = ktime_to_us(ktime_get());
 325        increment_us = time_us - adev->mm_stats.last_update_us;
 326        adev->mm_stats.last_update_us = time_us;
 327        adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
 328                                      us_upper_bound);
 329
 330        /* This prevents the short period of low performance when the VRAM
 331         * usage is low and the driver is in debt or doesn't have enough
 332         * accumulated us to fill VRAM quickly.
 333         *
 334         * The situation can occur in these cases:
 335         * - a lot of VRAM is freed by userspace
 336         * - the presence of a big buffer causes a lot of evictions
 337         *   (solution: split buffers into smaller ones)
 338         *
 339         * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
 340         * accum_us to a positive number.
 341         */
 342        if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
 343                s64 min_us;
 344
 345                /* Be more aggresive on dGPUs. Try to fill a portion of free
 346                 * VRAM now.
 347                 */
 348                if (!(adev->flags & AMD_IS_APU))
 349                        min_us = bytes_to_us(adev, free_vram / 4);
 350                else
 351                        min_us = 0; /* Reset accum_us on APUs. */
 352
 353                adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
 354        }
 355
 356        /* This is set to 0 if the driver is in debt to disallow (optional)
 357         * buffer moves.
 358         */
 359        *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 360
 361        /* Do the same for visible VRAM if half of it is free */
 362        if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
 363                u64 total_vis_vram = adev->gmc.visible_vram_size;
 364                u64 used_vis_vram =
 365                  amdgpu_vram_mgr_vis_usage(vram_man);
 366
 367                if (used_vis_vram < total_vis_vram) {
 368                        u64 free_vis_vram = total_vis_vram - used_vis_vram;
 369                        adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
 370                                                          increment_us, us_upper_bound);
 371
 372                        if (free_vis_vram >= total_vis_vram / 2)
 373                                adev->mm_stats.accum_us_vis =
 374                                        max(bytes_to_us(adev, free_vis_vram / 2),
 375                                            adev->mm_stats.accum_us_vis);
 376                }
 377
 378                *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
 379        } else {
 380                *max_vis_bytes = 0;
 381        }
 382
 383        spin_unlock(&adev->mm_stats.lock);
 384}
 385
 386/* Report how many bytes have really been moved for the last command
 387 * submission. This can result in a debt that can stop buffer migrations
 388 * temporarily.
 389 */
 390void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
 391                                  u64 num_vis_bytes)
 392{
 393        spin_lock(&adev->mm_stats.lock);
 394        adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
 395        adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
 396        spin_unlock(&adev->mm_stats.lock);
 397}
 398
 399static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 400{
 401        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 402        struct amdgpu_cs_parser *p = param;
 403        struct ttm_operation_ctx ctx = {
 404                .interruptible = true,
 405                .no_wait_gpu = false,
 406                .resv = bo->tbo.base.resv
 407        };
 408        uint32_t domain;
 409        int r;
 410
 411        if (bo->tbo.pin_count)
 412                return 0;
 413
 414        /* Don't move this buffer if we have depleted our allowance
 415         * to move it. Don't move anything if the threshold is zero.
 416         */
 417        if (p->bytes_moved < p->bytes_moved_threshold &&
 418            (!bo->tbo.base.dma_buf ||
 419            list_empty(&bo->tbo.base.dma_buf->attachments))) {
 420                if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 421                    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 422                        /* And don't move a CPU_ACCESS_REQUIRED BO to limited
 423                         * visible VRAM if we've depleted our allowance to do
 424                         * that.
 425                         */
 426                        if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
 427                                domain = bo->preferred_domains;
 428                        else
 429                                domain = bo->allowed_domains;
 430                } else {
 431                        domain = bo->preferred_domains;
 432                }
 433        } else {
 434                domain = bo->allowed_domains;
 435        }
 436
 437retry:
 438        amdgpu_bo_placement_from_domain(bo, domain);
 439        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 440
 441        p->bytes_moved += ctx.bytes_moved;
 442        if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 443            amdgpu_bo_in_cpu_visible_vram(bo))
 444                p->bytes_moved_vis += ctx.bytes_moved;
 445
 446        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 447                domain = bo->allowed_domains;
 448                goto retry;
 449        }
 450
 451        return r;
 452}
 453
 454static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 455                            struct list_head *validated)
 456{
 457        struct ttm_operation_ctx ctx = { true, false };
 458        struct amdgpu_bo_list_entry *lobj;
 459        int r;
 460
 461        list_for_each_entry(lobj, validated, tv.head) {
 462                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
 463                struct mm_struct *usermm;
 464
 465                usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
 466                if (usermm && usermm != current->mm)
 467                        return -EPERM;
 468
 469                if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
 470                    lobj->user_invalidated && lobj->user_pages) {
 471                        amdgpu_bo_placement_from_domain(bo,
 472                                                        AMDGPU_GEM_DOMAIN_CPU);
 473                        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 474                        if (r)
 475                                return r;
 476
 477                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
 478                                                     lobj->user_pages);
 479                }
 480
 481                r = amdgpu_cs_bo_validate(p, bo);
 482                if (r)
 483                        return r;
 484
 485                kvfree(lobj->user_pages);
 486                lobj->user_pages = NULL;
 487        }
 488        return 0;
 489}
 490
 491static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 492                                union drm_amdgpu_cs *cs)
 493{
 494        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 495        struct amdgpu_vm *vm = &fpriv->vm;
 496        struct amdgpu_bo_list_entry *e;
 497        struct list_head duplicates;
 498        struct amdgpu_bo *gds;
 499        struct amdgpu_bo *gws;
 500        struct amdgpu_bo *oa;
 501        int r;
 502
 503        INIT_LIST_HEAD(&p->validated);
 504
 505        /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
 506        if (cs->in.bo_list_handle) {
 507                if (p->bo_list)
 508                        return -EINVAL;
 509
 510                r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
 511                                       &p->bo_list);
 512                if (r)
 513                        return r;
 514        } else if (!p->bo_list) {
 515                /* Create a empty bo_list when no handle is provided */
 516                r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
 517                                          &p->bo_list);
 518                if (r)
 519                        return r;
 520        }
 521
 522        /* One for TTM and one for the CS job */
 523        amdgpu_bo_list_for_each_entry(e, p->bo_list)
 524                e->tv.num_shared = 2;
 525
 526        amdgpu_bo_list_get_list(p->bo_list, &p->validated);
 527
 528        INIT_LIST_HEAD(&duplicates);
 529        amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 530
 531        if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
 532                list_add(&p->uf_entry.tv.head, &p->validated);
 533
 534        /* Get userptr backing pages. If pages are updated after registered
 535         * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 536         * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 537         */
 538        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 539                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 540                bool userpage_invalidated = false;
 541                int i;
 542
 543                e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 544                                        sizeof(struct page *),
 545                                        GFP_KERNEL | __GFP_ZERO);
 546                if (!e->user_pages) {
 547                        DRM_ERROR("kvmalloc_array failure\n");
 548                        return -ENOMEM;
 549                }
 550
 551                r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
 552                if (r) {
 553                        kvfree(e->user_pages);
 554                        e->user_pages = NULL;
 555                        return r;
 556                }
 557
 558                for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 559                        if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 560                                userpage_invalidated = true;
 561                                break;
 562                        }
 563                }
 564                e->user_invalidated = userpage_invalidated;
 565        }
 566
 567        r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 568                                   &duplicates);
 569        if (unlikely(r != 0)) {
 570                if (r != -ERESTARTSYS)
 571                        DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 572                goto out;
 573        }
 574
 575        amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 576                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 577
 578                e->bo_va = amdgpu_vm_bo_find(vm, bo);
 579
 580                if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
 581                        e->chain = dma_fence_chain_alloc();
 582                        if (!e->chain) {
 583                                r = -ENOMEM;
 584                                goto error_validate;
 585                        }
 586                }
 587        }
 588
 589        amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
 590                                          &p->bytes_moved_vis_threshold);
 591        p->bytes_moved = 0;
 592        p->bytes_moved_vis = 0;
 593
 594        r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
 595                                      amdgpu_cs_bo_validate, p);
 596        if (r) {
 597                DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
 598                goto error_validate;
 599        }
 600
 601        r = amdgpu_cs_list_validate(p, &duplicates);
 602        if (r)
 603                goto error_validate;
 604
 605        r = amdgpu_cs_list_validate(p, &p->validated);
 606        if (r)
 607                goto error_validate;
 608
 609        amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
 610                                     p->bytes_moved_vis);
 611
 612        gds = p->bo_list->gds_obj;
 613        gws = p->bo_list->gws_obj;
 614        oa = p->bo_list->oa_obj;
 615
 616        if (gds) {
 617                p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
 618                p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
 619        }
 620        if (gws) {
 621                p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
 622                p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
 623        }
 624        if (oa) {
 625                p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
 626                p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
 627        }
 628
 629        if (!r && p->uf_entry.tv.bo) {
 630                struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
 631
 632                r = amdgpu_ttm_alloc_gart(&uf->tbo);
 633                p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
 634        }
 635
 636error_validate:
 637        if (r) {
 638                amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 639                        dma_fence_chain_free(e->chain);
 640                        e->chain = NULL;
 641                }
 642                ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 643        }
 644out:
 645        return r;
 646}
 647
 648static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 649{
 650        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 651        struct amdgpu_bo_list_entry *e;
 652        int r;
 653
 654        list_for_each_entry(e, &p->validated, tv.head) {
 655                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 656                struct dma_resv *resv = bo->tbo.base.resv;
 657                enum amdgpu_sync_mode sync_mode;
 658
 659                sync_mode = amdgpu_bo_explicit_sync(bo) ?
 660                        AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
 661                r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
 662                                     &fpriv->vm);
 663                if (r)
 664                        return r;
 665        }
 666        return 0;
 667}
 668
 669/**
 670 * amdgpu_cs_parser_fini() - clean parser states
 671 * @parser:     parser structure holding parsing context.
 672 * @error:      error number
 673 * @backoff:    indicator to backoff the reservation
 674 *
 675 * If error is set then unvalidate buffer, otherwise just free memory
 676 * used by parsing context.
 677 **/
 678static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 679                                  bool backoff)
 680{
 681        unsigned i;
 682
 683        if (error && backoff) {
 684                struct amdgpu_bo_list_entry *e;
 685
 686                amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
 687                        dma_fence_chain_free(e->chain);
 688                        e->chain = NULL;
 689                }
 690
 691                ttm_eu_backoff_reservation(&parser->ticket,
 692                                           &parser->validated);
 693        }
 694
 695        for (i = 0; i < parser->num_post_deps; i++) {
 696                drm_syncobj_put(parser->post_deps[i].syncobj);
 697                kfree(parser->post_deps[i].chain);
 698        }
 699        kfree(parser->post_deps);
 700
 701        dma_fence_put(parser->fence);
 702
 703        if (parser->ctx) {
 704                mutex_unlock(&parser->ctx->lock);
 705                amdgpu_ctx_put(parser->ctx);
 706        }
 707        if (parser->bo_list)
 708                amdgpu_bo_list_put(parser->bo_list);
 709
 710        for (i = 0; i < parser->nchunks; i++)
 711                kvfree(parser->chunks[i].kdata);
 712        kvfree(parser->chunks);
 713        if (parser->job)
 714                amdgpu_job_free(parser->job);
 715        if (parser->uf_entry.tv.bo) {
 716                struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
 717
 718                amdgpu_bo_unref(&uf);
 719        }
 720}
 721
 722static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 723{
 724        struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
 725        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 726        struct amdgpu_device *adev = p->adev;
 727        struct amdgpu_vm *vm = &fpriv->vm;
 728        struct amdgpu_bo_list_entry *e;
 729        struct amdgpu_bo_va *bo_va;
 730        struct amdgpu_bo *bo;
 731        int r;
 732
 733        /* Only for UVD/VCE VM emulation */
 734        if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
 735                unsigned i, j;
 736
 737                for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
 738                        struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 739                        struct amdgpu_bo_va_mapping *m;
 740                        struct amdgpu_bo *aobj = NULL;
 741                        struct amdgpu_cs_chunk *chunk;
 742                        uint64_t offset, va_start;
 743                        struct amdgpu_ib *ib;
 744                        uint8_t *kptr;
 745
 746                        chunk = &p->chunks[i];
 747                        ib = &p->job->ibs[j];
 748                        chunk_ib = chunk->kdata;
 749
 750                        if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 751                                continue;
 752
 753                        va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
 754                        r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
 755                        if (r) {
 756                                DRM_ERROR("IB va_start is invalid\n");
 757                                return r;
 758                        }
 759
 760                        if ((va_start + chunk_ib->ib_bytes) >
 761                            (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
 762                                DRM_ERROR("IB va_start+ib_bytes is invalid\n");
 763                                return -EINVAL;
 764                        }
 765
 766                        /* the IB should be reserved at this point */
 767                        r = amdgpu_bo_kmap(aobj, (void **)&kptr);
 768                        if (r) {
 769                                return r;
 770                        }
 771
 772                        offset = m->start * AMDGPU_GPU_PAGE_SIZE;
 773                        kptr += va_start - offset;
 774
 775                        if (ring->funcs->parse_cs) {
 776                                memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
 777                                amdgpu_bo_kunmap(aobj);
 778
 779                                r = amdgpu_ring_parse_cs(ring, p, j);
 780                                if (r)
 781                                        return r;
 782                        } else {
 783                                ib->ptr = (uint32_t *)kptr;
 784                                r = amdgpu_ring_patch_cs_in_place(ring, p, j);
 785                                amdgpu_bo_kunmap(aobj);
 786                                if (r)
 787                                        return r;
 788                        }
 789
 790                        j++;
 791                }
 792        }
 793
 794        if (!p->job->vm)
 795                return amdgpu_cs_sync_rings(p);
 796
 797
 798        r = amdgpu_vm_clear_freed(adev, vm, NULL);
 799        if (r)
 800                return r;
 801
 802        r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
 803        if (r)
 804                return r;
 805
 806        r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
 807        if (r)
 808                return r;
 809
 810        if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
 811                bo_va = fpriv->csa_va;
 812                BUG_ON(!bo_va);
 813                r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
 814                if (r)
 815                        return r;
 816
 817                r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
 818                if (r)
 819                        return r;
 820        }
 821
 822        amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 823                /* ignore duplicates */
 824                bo = ttm_to_amdgpu_bo(e->tv.bo);
 825                if (!bo)
 826                        continue;
 827
 828                bo_va = e->bo_va;
 829                if (bo_va == NULL)
 830                        continue;
 831
 832                r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
 833                if (r)
 834                        return r;
 835
 836                r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
 837                if (r)
 838                        return r;
 839        }
 840
 841        r = amdgpu_vm_handle_moved(adev, vm);
 842        if (r)
 843                return r;
 844
 845        r = amdgpu_vm_update_pdes(adev, vm, false);
 846        if (r)
 847                return r;
 848
 849        r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
 850        if (r)
 851                return r;
 852
 853        p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
 854
 855        if (amdgpu_vm_debug) {
 856                /* Invalidate all BOs to test for userspace bugs */
 857                amdgpu_bo_list_for_each_entry(e, p->bo_list) {
 858                        struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 859
 860                        /* ignore duplicates */
 861                        if (!bo)
 862                                continue;
 863
 864                        amdgpu_vm_bo_invalidate(adev, bo, false);
 865                }
 866        }
 867
 868        return amdgpu_cs_sync_rings(p);
 869}
 870
 871static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 872                             struct amdgpu_cs_parser *parser)
 873{
 874        struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
 875        struct amdgpu_vm *vm = &fpriv->vm;
 876        int r, ce_preempt = 0, de_preempt = 0;
 877        struct amdgpu_ring *ring;
 878        int i, j;
 879
 880        for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
 881                struct amdgpu_cs_chunk *chunk;
 882                struct amdgpu_ib *ib;
 883                struct drm_amdgpu_cs_chunk_ib *chunk_ib;
 884                struct drm_sched_entity *entity;
 885
 886                chunk = &parser->chunks[i];
 887                ib = &parser->job->ibs[j];
 888                chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
 889
 890                if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
 891                        continue;
 892
 893                if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
 894                    (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
 895                        if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
 896                                if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
 897                                        ce_preempt++;
 898                                else
 899                                        de_preempt++;
 900                        }
 901
 902                        /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
 903                        if (ce_preempt > 1 || de_preempt > 1)
 904                                return -EINVAL;
 905                }
 906
 907                r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
 908                                          chunk_ib->ip_instance, chunk_ib->ring,
 909                                          &entity);
 910                if (r)
 911                        return r;
 912
 913                if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
 914                        parser->job->preamble_status |=
 915                                AMDGPU_PREAMBLE_IB_PRESENT;
 916
 917                if (parser->entity && parser->entity != entity)
 918                        return -EINVAL;
 919
 920                /* Return if there is no run queue associated with this entity.
 921                 * Possibly because of disabled HW IP*/
 922                if (entity->rq == NULL)
 923                        return -EINVAL;
 924
 925                parser->entity = entity;
 926
 927                ring = to_amdgpu_ring(entity->rq->sched);
 928                r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
 929                                   chunk_ib->ib_bytes : 0,
 930                                   AMDGPU_IB_POOL_DELAYED, ib);
 931                if (r) {
 932                        DRM_ERROR("Failed to get ib !\n");
 933                        return r;
 934                }
 935
 936                ib->gpu_addr = chunk_ib->va_start;
 937                ib->length_dw = chunk_ib->ib_bytes / 4;
 938                ib->flags = chunk_ib->flags;
 939
 940                j++;
 941        }
 942
 943        /* MM engine doesn't support user fences */
 944        ring = to_amdgpu_ring(parser->entity->rq->sched);
 945        if (parser->job->uf_addr && ring->funcs->no_user_fence)
 946                return -EINVAL;
 947
 948        return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
 949}
 950
 951static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
 952                                       struct amdgpu_cs_chunk *chunk)
 953{
 954        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 955        unsigned num_deps;
 956        int i, r;
 957        struct drm_amdgpu_cs_chunk_dep *deps;
 958
 959        deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
 960        num_deps = chunk->length_dw * 4 /
 961                sizeof(struct drm_amdgpu_cs_chunk_dep);
 962
 963        for (i = 0; i < num_deps; ++i) {
 964                struct amdgpu_ctx *ctx;
 965                struct drm_sched_entity *entity;
 966                struct dma_fence *fence;
 967
 968                ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
 969                if (ctx == NULL)
 970                        return -EINVAL;
 971
 972                r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
 973                                          deps[i].ip_instance,
 974                                          deps[i].ring, &entity);
 975                if (r) {
 976                        amdgpu_ctx_put(ctx);
 977                        return r;
 978                }
 979
 980                fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 981                amdgpu_ctx_put(ctx);
 982
 983                if (IS_ERR(fence))
 984                        return PTR_ERR(fence);
 985                else if (!fence)
 986                        continue;
 987
 988                if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 989                        struct drm_sched_fence *s_fence;
 990                        struct dma_fence *old = fence;
 991
 992                        s_fence = to_drm_sched_fence(fence);
 993                        fence = dma_fence_get(&s_fence->scheduled);
 994                        dma_fence_put(old);
 995                }
 996
 997                r = amdgpu_sync_fence(&p->job->sync, fence);
 998                dma_fence_put(fence);
 999                if (r)
1000                        return r;
1001        }
1002        return 0;
1003}
1004
1005static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1006                                                 uint32_t handle, u64 point,
1007                                                 u64 flags)
1008{
1009        struct dma_fence *fence;
1010        int r;
1011
1012        r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1013        if (r) {
1014                DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1015                          handle, point, r);
1016                return r;
1017        }
1018
1019        r = amdgpu_sync_fence(&p->job->sync, fence);
1020        dma_fence_put(fence);
1021
1022        return r;
1023}
1024
1025static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1026                                            struct amdgpu_cs_chunk *chunk)
1027{
1028        struct drm_amdgpu_cs_chunk_sem *deps;
1029        unsigned num_deps;
1030        int i, r;
1031
1032        deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1033        num_deps = chunk->length_dw * 4 /
1034                sizeof(struct drm_amdgpu_cs_chunk_sem);
1035        for (i = 0; i < num_deps; ++i) {
1036                r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1037                                                          0, 0);
1038                if (r)
1039                        return r;
1040        }
1041
1042        return 0;
1043}
1044
1045
1046static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1047                                                     struct amdgpu_cs_chunk *chunk)
1048{
1049        struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1050        unsigned num_deps;
1051        int i, r;
1052
1053        syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1054        num_deps = chunk->length_dw * 4 /
1055                sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1056        for (i = 0; i < num_deps; ++i) {
1057                r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1058                                                          syncobj_deps[i].handle,
1059                                                          syncobj_deps[i].point,
1060                                                          syncobj_deps[i].flags);
1061                if (r)
1062                        return r;
1063        }
1064
1065        return 0;
1066}
1067
1068static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1069                                             struct amdgpu_cs_chunk *chunk)
1070{
1071        struct drm_amdgpu_cs_chunk_sem *deps;
1072        unsigned num_deps;
1073        int i;
1074
1075        deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1076        num_deps = chunk->length_dw * 4 /
1077                sizeof(struct drm_amdgpu_cs_chunk_sem);
1078
1079        if (p->post_deps)
1080                return -EINVAL;
1081
1082        p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1083                                     GFP_KERNEL);
1084        p->num_post_deps = 0;
1085
1086        if (!p->post_deps)
1087                return -ENOMEM;
1088
1089
1090        for (i = 0; i < num_deps; ++i) {
1091                p->post_deps[i].syncobj =
1092                        drm_syncobj_find(p->filp, deps[i].handle);
1093                if (!p->post_deps[i].syncobj)
1094                        return -EINVAL;
1095                p->post_deps[i].chain = NULL;
1096                p->post_deps[i].point = 0;
1097                p->num_post_deps++;
1098        }
1099
1100        return 0;
1101}
1102
1103
1104static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1105                                                      struct amdgpu_cs_chunk *chunk)
1106{
1107        struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1108        unsigned num_deps;
1109        int i;
1110
1111        syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1112        num_deps = chunk->length_dw * 4 /
1113                sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1114
1115        if (p->post_deps)
1116                return -EINVAL;
1117
1118        p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1119                                     GFP_KERNEL);
1120        p->num_post_deps = 0;
1121
1122        if (!p->post_deps)
1123                return -ENOMEM;
1124
1125        for (i = 0; i < num_deps; ++i) {
1126                struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1127
1128                dep->chain = NULL;
1129                if (syncobj_deps[i].point) {
1130                        dep->chain = dma_fence_chain_alloc();
1131                        if (!dep->chain)
1132                                return -ENOMEM;
1133                }
1134
1135                dep->syncobj = drm_syncobj_find(p->filp,
1136                                                syncobj_deps[i].handle);
1137                if (!dep->syncobj) {
1138                        dma_fence_chain_free(dep->chain);
1139                        return -EINVAL;
1140                }
1141                dep->point = syncobj_deps[i].point;
1142                p->num_post_deps++;
1143        }
1144
1145        return 0;
1146}
1147
1148static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1149                                  struct amdgpu_cs_parser *p)
1150{
1151        int i, r;
1152
1153        for (i = 0; i < p->nchunks; ++i) {
1154                struct amdgpu_cs_chunk *chunk;
1155
1156                chunk = &p->chunks[i];
1157
1158                switch (chunk->chunk_id) {
1159                case AMDGPU_CHUNK_ID_DEPENDENCIES:
1160                case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1161                        r = amdgpu_cs_process_fence_dep(p, chunk);
1162                        if (r)
1163                                return r;
1164                        break;
1165                case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1166                        r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1167                        if (r)
1168                                return r;
1169                        break;
1170                case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1171                        r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1172                        if (r)
1173                                return r;
1174                        break;
1175                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1176                        r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1177                        if (r)
1178                                return r;
1179                        break;
1180                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1181                        r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1182                        if (r)
1183                                return r;
1184                        break;
1185                }
1186        }
1187
1188        return 0;
1189}
1190
1191static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1192{
1193        int i;
1194
1195        for (i = 0; i < p->num_post_deps; ++i) {
1196                if (p->post_deps[i].chain && p->post_deps[i].point) {
1197                        drm_syncobj_add_point(p->post_deps[i].syncobj,
1198                                              p->post_deps[i].chain,
1199                                              p->fence, p->post_deps[i].point);
1200                        p->post_deps[i].chain = NULL;
1201                } else {
1202                        drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1203                                                  p->fence);
1204                }
1205        }
1206}
1207
1208static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1209                            union drm_amdgpu_cs *cs)
1210{
1211        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1212        struct drm_sched_entity *entity = p->entity;
1213        struct amdgpu_bo_list_entry *e;
1214        struct amdgpu_job *job;
1215        uint64_t seq;
1216        int r;
1217
1218        job = p->job;
1219        p->job = NULL;
1220
1221        r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1222        if (r)
1223                goto error_unlock;
1224
1225        /* No memory allocation is allowed while holding the notifier lock.
1226         * The lock is held until amdgpu_cs_submit is finished and fence is
1227         * added to BOs.
1228         */
1229        mutex_lock(&p->adev->notifier_lock);
1230
1231        /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1232         * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1233         */
1234        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1235                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1236
1237                r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1238        }
1239        if (r) {
1240                r = -EAGAIN;
1241                goto error_abort;
1242        }
1243
1244        p->fence = dma_fence_get(&job->base.s_fence->finished);
1245
1246        amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1247        amdgpu_cs_post_dependencies(p);
1248
1249        if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1250            !p->ctx->preamble_presented) {
1251                job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1252                p->ctx->preamble_presented = true;
1253        }
1254
1255        cs->out.handle = seq;
1256        job->uf_sequence = seq;
1257
1258        amdgpu_job_free_resources(job);
1259
1260        trace_amdgpu_cs_ioctl(job);
1261        amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1262        drm_sched_entity_push_job(&job->base, entity);
1263
1264        amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1265
1266        amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1267                struct dma_resv *resv = e->tv.bo->base.resv;
1268                struct dma_fence_chain *chain = e->chain;
1269
1270                if (!chain)
1271                        continue;
1272
1273                /*
1274                 * Work around dma_resv shortcommings by wrapping up the
1275                 * submission in a dma_fence_chain and add it as exclusive
1276                 * fence, but first add the submission as shared fence to make
1277                 * sure that shared fences never signal before the exclusive
1278                 * one.
1279                 */
1280                dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
1281                                     dma_fence_get(p->fence), 1);
1282
1283                dma_resv_add_shared_fence(resv, p->fence);
1284                rcu_assign_pointer(resv->fence_excl, &chain->base);
1285                e->chain = NULL;
1286        }
1287
1288        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1289        mutex_unlock(&p->adev->notifier_lock);
1290
1291        return 0;
1292
1293error_abort:
1294        drm_sched_job_cleanup(&job->base);
1295        mutex_unlock(&p->adev->notifier_lock);
1296
1297error_unlock:
1298        amdgpu_job_free(job);
1299        return r;
1300}
1301
1302static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
1303{
1304        int i;
1305
1306        if (!trace_amdgpu_cs_enabled())
1307                return;
1308
1309        for (i = 0; i < parser->job->num_ibs; i++)
1310                trace_amdgpu_cs(parser, i);
1311}
1312
1313int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1314{
1315        struct amdgpu_device *adev = drm_to_adev(dev);
1316        union drm_amdgpu_cs *cs = data;
1317        struct amdgpu_cs_parser parser = {};
1318        bool reserved_buffers = false;
1319        int r;
1320
1321        if (amdgpu_ras_intr_triggered())
1322                return -EHWPOISON;
1323
1324        if (!adev->accel_working)
1325                return -EBUSY;
1326
1327        parser.adev = adev;
1328        parser.filp = filp;
1329
1330        r = amdgpu_cs_parser_init(&parser, data);
1331        if (r) {
1332                if (printk_ratelimit())
1333                        DRM_ERROR("Failed to initialize parser %d!\n", r);
1334                goto out;
1335        }
1336
1337        r = amdgpu_cs_ib_fill(adev, &parser);
1338        if (r)
1339                goto out;
1340
1341        r = amdgpu_cs_dependencies(adev, &parser);
1342        if (r) {
1343                DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1344                goto out;
1345        }
1346
1347        r = amdgpu_cs_parser_bos(&parser, data);
1348        if (r) {
1349                if (r == -ENOMEM)
1350                        DRM_ERROR("Not enough memory for command submission!\n");
1351                else if (r != -ERESTARTSYS && r != -EAGAIN)
1352                        DRM_ERROR("Failed to process the buffer list %d!\n", r);
1353                goto out;
1354        }
1355
1356        reserved_buffers = true;
1357
1358        trace_amdgpu_cs_ibs(&parser);
1359
1360        r = amdgpu_cs_vm_handling(&parser);
1361        if (r)
1362                goto out;
1363
1364        r = amdgpu_cs_submit(&parser, cs);
1365
1366out:
1367        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1368
1369        return r;
1370}
1371
1372/**
1373 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1374 *
1375 * @dev: drm device
1376 * @data: data from userspace
1377 * @filp: file private
1378 *
1379 * Wait for the command submission identified by handle to finish.
1380 */
1381int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1382                         struct drm_file *filp)
1383{
1384        union drm_amdgpu_wait_cs *wait = data;
1385        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1386        struct drm_sched_entity *entity;
1387        struct amdgpu_ctx *ctx;
1388        struct dma_fence *fence;
1389        long r;
1390
1391        ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1392        if (ctx == NULL)
1393                return -EINVAL;
1394
1395        r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1396                                  wait->in.ring, &entity);
1397        if (r) {
1398                amdgpu_ctx_put(ctx);
1399                return r;
1400        }
1401
1402        fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1403        if (IS_ERR(fence))
1404                r = PTR_ERR(fence);
1405        else if (fence) {
1406                r = dma_fence_wait_timeout(fence, true, timeout);
1407                if (r > 0 && fence->error)
1408                        r = fence->error;
1409                dma_fence_put(fence);
1410        } else
1411                r = 1;
1412
1413        amdgpu_ctx_put(ctx);
1414        if (r < 0)
1415                return r;
1416
1417        memset(wait, 0, sizeof(*wait));
1418        wait->out.status = (r == 0);
1419
1420        return 0;
1421}
1422
1423/**
1424 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1425 *
1426 * @adev: amdgpu device
1427 * @filp: file private
1428 * @user: drm_amdgpu_fence copied from user space
1429 */
1430static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1431                                             struct drm_file *filp,
1432                                             struct drm_amdgpu_fence *user)
1433{
1434        struct drm_sched_entity *entity;
1435        struct amdgpu_ctx *ctx;
1436        struct dma_fence *fence;
1437        int r;
1438
1439        ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1440        if (ctx == NULL)
1441                return ERR_PTR(-EINVAL);
1442
1443        r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1444                                  user->ring, &entity);
1445        if (r) {
1446                amdgpu_ctx_put(ctx);
1447                return ERR_PTR(r);
1448        }
1449
1450        fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1451        amdgpu_ctx_put(ctx);
1452
1453        return fence;
1454}
1455
1456int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1457                                    struct drm_file *filp)
1458{
1459        struct amdgpu_device *adev = drm_to_adev(dev);
1460        union drm_amdgpu_fence_to_handle *info = data;
1461        struct dma_fence *fence;
1462        struct drm_syncobj *syncobj;
1463        struct sync_file *sync_file;
1464        int fd, r;
1465
1466        fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1467        if (IS_ERR(fence))
1468                return PTR_ERR(fence);
1469
1470        if (!fence)
1471                fence = dma_fence_get_stub();
1472
1473        switch (info->in.what) {
1474        case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1475                r = drm_syncobj_create(&syncobj, 0, fence);
1476                dma_fence_put(fence);
1477                if (r)
1478                        return r;
1479                r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1480                drm_syncobj_put(syncobj);
1481                return r;
1482
1483        case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1484                r = drm_syncobj_create(&syncobj, 0, fence);
1485                dma_fence_put(fence);
1486                if (r)
1487                        return r;
1488                r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1489                drm_syncobj_put(syncobj);
1490                return r;
1491
1492        case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1493                fd = get_unused_fd_flags(O_CLOEXEC);
1494                if (fd < 0) {
1495                        dma_fence_put(fence);
1496                        return fd;
1497                }
1498
1499                sync_file = sync_file_create(fence);
1500                dma_fence_put(fence);
1501                if (!sync_file) {
1502                        put_unused_fd(fd);
1503                        return -ENOMEM;
1504                }
1505
1506                fd_install(fd, sync_file->file);
1507                info->out.handle = fd;
1508                return 0;
1509
1510        default:
1511                return -EINVAL;
1512        }
1513}
1514
1515/**
1516 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1517 *
1518 * @adev: amdgpu device
1519 * @filp: file private
1520 * @wait: wait parameters
1521 * @fences: array of drm_amdgpu_fence
1522 */
1523static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1524                                     struct drm_file *filp,
1525                                     union drm_amdgpu_wait_fences *wait,
1526                                     struct drm_amdgpu_fence *fences)
1527{
1528        uint32_t fence_count = wait->in.fence_count;
1529        unsigned int i;
1530        long r = 1;
1531
1532        for (i = 0; i < fence_count; i++) {
1533                struct dma_fence *fence;
1534                unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1535
1536                fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1537                if (IS_ERR(fence))
1538                        return PTR_ERR(fence);
1539                else if (!fence)
1540                        continue;
1541
1542                r = dma_fence_wait_timeout(fence, true, timeout);
1543                dma_fence_put(fence);
1544                if (r < 0)
1545                        return r;
1546
1547                if (r == 0)
1548                        break;
1549
1550                if (fence->error)
1551                        return fence->error;
1552        }
1553
1554        memset(wait, 0, sizeof(*wait));
1555        wait->out.status = (r > 0);
1556
1557        return 0;
1558}
1559
1560/**
1561 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1562 *
1563 * @adev: amdgpu device
1564 * @filp: file private
1565 * @wait: wait parameters
1566 * @fences: array of drm_amdgpu_fence
1567 */
1568static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1569                                    struct drm_file *filp,
1570                                    union drm_amdgpu_wait_fences *wait,
1571                                    struct drm_amdgpu_fence *fences)
1572{
1573        unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1574        uint32_t fence_count = wait->in.fence_count;
1575        uint32_t first = ~0;
1576        struct dma_fence **array;
1577        unsigned int i;
1578        long r;
1579
1580        /* Prepare the fence array */
1581        array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1582
1583        if (array == NULL)
1584                return -ENOMEM;
1585
1586        for (i = 0; i < fence_count; i++) {
1587                struct dma_fence *fence;
1588
1589                fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1590                if (IS_ERR(fence)) {
1591                        r = PTR_ERR(fence);
1592                        goto err_free_fence_array;
1593                } else if (fence) {
1594                        array[i] = fence;
1595                } else { /* NULL, the fence has been already signaled */
1596                        r = 1;
1597                        first = i;
1598                        goto out;
1599                }
1600        }
1601
1602        r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1603                                       &first);
1604        if (r < 0)
1605                goto err_free_fence_array;
1606
1607out:
1608        memset(wait, 0, sizeof(*wait));
1609        wait->out.status = (r > 0);
1610        wait->out.first_signaled = first;
1611
1612        if (first < fence_count && array[first])
1613                r = array[first]->error;
1614        else
1615                r = 0;
1616
1617err_free_fence_array:
1618        for (i = 0; i < fence_count; i++)
1619                dma_fence_put(array[i]);
1620        kfree(array);
1621
1622        return r;
1623}
1624
1625/**
1626 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1627 *
1628 * @dev: drm device
1629 * @data: data from userspace
1630 * @filp: file private
1631 */
1632int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1633                                struct drm_file *filp)
1634{
1635        struct amdgpu_device *adev = drm_to_adev(dev);
1636        union drm_amdgpu_wait_fences *wait = data;
1637        uint32_t fence_count = wait->in.fence_count;
1638        struct drm_amdgpu_fence *fences_user;
1639        struct drm_amdgpu_fence *fences;
1640        int r;
1641
1642        /* Get the fences from userspace */
1643        fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1644                        GFP_KERNEL);
1645        if (fences == NULL)
1646                return -ENOMEM;
1647
1648        fences_user = u64_to_user_ptr(wait->in.fences);
1649        if (copy_from_user(fences, fences_user,
1650                sizeof(struct drm_amdgpu_fence) * fence_count)) {
1651                r = -EFAULT;
1652                goto err_free_fences;
1653        }
1654
1655        if (wait->in.wait_all)
1656                r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1657        else
1658                r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1659
1660err_free_fences:
1661        kfree(fences);
1662
1663        return r;
1664}
1665
1666/**
1667 * amdgpu_cs_find_mapping - find bo_va for VM address
1668 *
1669 * @parser: command submission parser context
1670 * @addr: VM address
1671 * @bo: resulting BO of the mapping found
1672 * @map: Placeholder to return found BO mapping
1673 *
1674 * Search the buffer objects in the command submission context for a certain
1675 * virtual memory address. Returns allocation structure when found, NULL
1676 * otherwise.
1677 */
1678int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1679                           uint64_t addr, struct amdgpu_bo **bo,
1680                           struct amdgpu_bo_va_mapping **map)
1681{
1682        struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1683        struct ttm_operation_ctx ctx = { false, false };
1684        struct amdgpu_vm *vm = &fpriv->vm;
1685        struct amdgpu_bo_va_mapping *mapping;
1686        int r;
1687
1688        addr /= AMDGPU_GPU_PAGE_SIZE;
1689
1690        mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1691        if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1692                return -EINVAL;
1693
1694        *bo = mapping->bo_va->base.bo;
1695        *map = mapping;
1696
1697        /* Double check that the BO is reserved by this CS */
1698        if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1699                return -EINVAL;
1700
1701        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1702                (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1703                amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1704                r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1705                if (r)
1706                        return r;
1707        }
1708
1709        return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1710}
1711