linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
<<
>>
Prefs
   1/*
   2 * Copyright 2017 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23#include "amdgpu_ids.h"
  24
  25#include <linux/idr.h>
  26#include <linux/dma-fence-array.h>
  27
  28
  29#include "amdgpu.h"
  30#include "amdgpu_trace.h"
  31
  32/*
  33 * PASID manager
  34 *
  35 * PASIDs are global address space identifiers that can be shared
  36 * between the GPU, an IOMMU and the driver. VMs on different devices
  37 * may use the same PASID if they share the same address
  38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
  39 * looked up from the PASID per amdgpu_device.
  40 */
  41static DEFINE_IDA(amdgpu_pasid_ida);
  42
  43/* Helper to free pasid from a fence callback */
  44struct amdgpu_pasid_cb {
  45        struct dma_fence_cb cb;
  46        u32 pasid;
  47};
  48
  49/**
  50 * amdgpu_pasid_alloc - Allocate a PASID
  51 * @bits: Maximum width of the PASID in bits, must be at least 1
  52 *
  53 * Allocates a PASID of the given width while keeping smaller PASIDs
  54 * available if possible.
  55 *
  56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
  57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
  58 * memory allocation failure.
  59 */
  60int amdgpu_pasid_alloc(unsigned int bits)
  61{
  62        int pasid = -EINVAL;
  63
  64        for (bits = min(bits, 31U); bits > 0; bits--) {
  65                pasid = ida_simple_get(&amdgpu_pasid_ida,
  66                                       1U << (bits - 1), 1U << bits,
  67                                       GFP_KERNEL);
  68                if (pasid != -ENOSPC)
  69                        break;
  70        }
  71
  72        if (pasid >= 0)
  73                trace_amdgpu_pasid_allocated(pasid);
  74
  75        return pasid;
  76}
  77
  78/**
  79 * amdgpu_pasid_free - Free a PASID
  80 * @pasid: PASID to free
  81 */
  82void amdgpu_pasid_free(u32 pasid)
  83{
  84        trace_amdgpu_pasid_freed(pasid);
  85        ida_simple_remove(&amdgpu_pasid_ida, pasid);
  86}
  87
  88static void amdgpu_pasid_free_cb(struct dma_fence *fence,
  89                                 struct dma_fence_cb *_cb)
  90{
  91        struct amdgpu_pasid_cb *cb =
  92                container_of(_cb, struct amdgpu_pasid_cb, cb);
  93
  94        amdgpu_pasid_free(cb->pasid);
  95        dma_fence_put(fence);
  96        kfree(cb);
  97}
  98
  99/**
 100 * amdgpu_pasid_free_delayed - free pasid when fences signal
 101 *
 102 * @resv: reservation object with the fences to wait for
 103 * @pasid: pasid to free
 104 *
 105 * Free the pasid only after all the fences in resv are signaled.
 106 */
 107void amdgpu_pasid_free_delayed(struct dma_resv *resv,
 108                               u32 pasid)
 109{
 110        struct dma_fence *fence, **fences;
 111        struct amdgpu_pasid_cb *cb;
 112        unsigned count;
 113        int r;
 114
 115        r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
 116        if (r)
 117                goto fallback;
 118
 119        if (count == 0) {
 120                amdgpu_pasid_free(pasid);
 121                return;
 122        }
 123
 124        if (count == 1) {
 125                fence = fences[0];
 126                kfree(fences);
 127        } else {
 128                uint64_t context = dma_fence_context_alloc(1);
 129                struct dma_fence_array *array;
 130
 131                array = dma_fence_array_create(count, fences, context,
 132                                               1, false);
 133                if (!array) {
 134                        kfree(fences);
 135                        goto fallback;
 136                }
 137                fence = &array->base;
 138        }
 139
 140        cb = kmalloc(sizeof(*cb), GFP_KERNEL);
 141        if (!cb) {
 142                /* Last resort when we are OOM */
 143                dma_fence_wait(fence, false);
 144                dma_fence_put(fence);
 145                amdgpu_pasid_free(pasid);
 146        } else {
 147                cb->pasid = pasid;
 148                if (dma_fence_add_callback(fence, &cb->cb,
 149                                           amdgpu_pasid_free_cb))
 150                        amdgpu_pasid_free_cb(fence, &cb->cb);
 151        }
 152
 153        return;
 154
 155fallback:
 156        /* Not enough memory for the delayed delete, as last resort
 157         * block for all the fences to complete.
 158         */
 159        dma_resv_wait_timeout_rcu(resv, true, false,
 160                                            MAX_SCHEDULE_TIMEOUT);
 161        amdgpu_pasid_free(pasid);
 162}
 163
 164/*
 165 * VMID manager
 166 *
 167 * VMIDs are a per VMHUB identifier for page tables handling.
 168 */
 169
 170/**
 171 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
 172 *
 173 * @adev: amdgpu_device pointer
 174 * @id: VMID structure
 175 *
 176 * Check if GPU reset occured since last use of the VMID.
 177 */
 178bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
 179                               struct amdgpu_vmid *id)
 180{
 181        return id->current_gpu_reset_count !=
 182                atomic_read(&adev->gpu_reset_counter);
 183}
 184
 185/**
 186 * amdgpu_vm_grab_idle - grab idle VMID
 187 *
 188 * @vm: vm to allocate id for
 189 * @ring: ring we want to submit job to
 190 * @sync: sync object where we add dependencies
 191 * @idle: resulting idle VMID
 192 *
 193 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
 194 * object. Returns -ENOMEM when we are out of memory.
 195 */
 196static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 197                                 struct amdgpu_ring *ring,
 198                                 struct amdgpu_sync *sync,
 199                                 struct amdgpu_vmid **idle)
 200{
 201        struct amdgpu_device *adev = ring->adev;
 202        unsigned vmhub = ring->funcs->vmhub;
 203        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 204        struct dma_fence **fences;
 205        unsigned i;
 206        int r;
 207
 208        if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
 209                return amdgpu_sync_fence(sync, ring->vmid_wait);
 210
 211        fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
 212        if (!fences)
 213                return -ENOMEM;
 214
 215        /* Check if we have an idle VMID */
 216        i = 0;
 217        list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
 218                fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
 219                if (!fences[i])
 220                        break;
 221                ++i;
 222        }
 223
 224        /* If we can't find a idle VMID to use, wait till one becomes available */
 225        if (&(*idle)->list == &id_mgr->ids_lru) {
 226                u64 fence_context = adev->vm_manager.fence_context + ring->idx;
 227                unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
 228                struct dma_fence_array *array;
 229                unsigned j;
 230
 231                *idle = NULL;
 232                for (j = 0; j < i; ++j)
 233                        dma_fence_get(fences[j]);
 234
 235                array = dma_fence_array_create(i, fences, fence_context,
 236                                               seqno, true);
 237                if (!array) {
 238                        for (j = 0; j < i; ++j)
 239                                dma_fence_put(fences[j]);
 240                        kfree(fences);
 241                        return -ENOMEM;
 242                }
 243
 244                r = amdgpu_sync_fence(sync, &array->base);
 245                dma_fence_put(ring->vmid_wait);
 246                ring->vmid_wait = &array->base;
 247                return r;
 248        }
 249        kfree(fences);
 250
 251        return 0;
 252}
 253
 254/**
 255 * amdgpu_vm_grab_reserved - try to assign reserved VMID
 256 *
 257 * @vm: vm to allocate id for
 258 * @ring: ring we want to submit job to
 259 * @sync: sync object where we add dependencies
 260 * @fence: fence protecting ID from reuse
 261 * @job: job who wants to use the VMID
 262 *
 263 * Try to assign a reserved VMID.
 264 */
 265static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 266                                     struct amdgpu_ring *ring,
 267                                     struct amdgpu_sync *sync,
 268                                     struct dma_fence *fence,
 269                                     struct amdgpu_job *job,
 270                                     struct amdgpu_vmid **id)
 271{
 272        struct amdgpu_device *adev = ring->adev;
 273        unsigned vmhub = ring->funcs->vmhub;
 274        uint64_t fence_context = adev->fence_context + ring->idx;
 275        struct dma_fence *updates = sync->last_vm_update;
 276        bool needs_flush = vm->use_cpu_for_update;
 277        int r = 0;
 278
 279        *id = vm->reserved_vmid[vmhub];
 280        if (updates && (*id)->flushed_updates &&
 281            updates->context == (*id)->flushed_updates->context &&
 282            !dma_fence_is_later(updates, (*id)->flushed_updates))
 283            updates = NULL;
 284
 285        if ((*id)->owner != vm->immediate.fence_context ||
 286            job->vm_pd_addr != (*id)->pd_gpu_addr ||
 287            updates || !(*id)->last_flush ||
 288            ((*id)->last_flush->context != fence_context &&
 289             !dma_fence_is_signaled((*id)->last_flush))) {
 290                struct dma_fence *tmp;
 291
 292                /* to prevent one context starved by another context */
 293                (*id)->pd_gpu_addr = 0;
 294                tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
 295                if (tmp) {
 296                        *id = NULL;
 297                        r = amdgpu_sync_fence(sync, tmp);
 298                        return r;
 299                }
 300                needs_flush = true;
 301        }
 302
 303        /* Good we can use this VMID. Remember this submission as
 304        * user of the VMID.
 305        */
 306        r = amdgpu_sync_fence(&(*id)->active, fence);
 307        if (r)
 308                return r;
 309
 310        if (updates) {
 311                dma_fence_put((*id)->flushed_updates);
 312                (*id)->flushed_updates = dma_fence_get(updates);
 313        }
 314        job->vm_needs_flush = needs_flush;
 315        return 0;
 316}
 317
 318/**
 319 * amdgpu_vm_grab_used - try to reuse a VMID
 320 *
 321 * @vm: vm to allocate id for
 322 * @ring: ring we want to submit job to
 323 * @sync: sync object where we add dependencies
 324 * @fence: fence protecting ID from reuse
 325 * @job: job who wants to use the VMID
 326 * @id: resulting VMID
 327 *
 328 * Try to reuse a VMID for this submission.
 329 */
 330static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
 331                                 struct amdgpu_ring *ring,
 332                                 struct amdgpu_sync *sync,
 333                                 struct dma_fence *fence,
 334                                 struct amdgpu_job *job,
 335                                 struct amdgpu_vmid **id)
 336{
 337        struct amdgpu_device *adev = ring->adev;
 338        unsigned vmhub = ring->funcs->vmhub;
 339        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 340        uint64_t fence_context = adev->fence_context + ring->idx;
 341        struct dma_fence *updates = sync->last_vm_update;
 342        int r;
 343
 344        job->vm_needs_flush = vm->use_cpu_for_update;
 345
 346        /* Check if we can use a VMID already assigned to this VM */
 347        list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
 348                bool needs_flush = vm->use_cpu_for_update;
 349                struct dma_fence *flushed;
 350
 351                /* Check all the prerequisites to using this VMID */
 352                if ((*id)->owner != vm->immediate.fence_context)
 353                        continue;
 354
 355                if ((*id)->pd_gpu_addr != job->vm_pd_addr)
 356                        continue;
 357
 358                if (!(*id)->last_flush ||
 359                    ((*id)->last_flush->context != fence_context &&
 360                     !dma_fence_is_signaled((*id)->last_flush)))
 361                        needs_flush = true;
 362
 363                flushed  = (*id)->flushed_updates;
 364                if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
 365                        needs_flush = true;
 366
 367                /* Concurrent flushes are only possible starting with Vega10 and
 368                 * are broken on Navi10 and Navi14.
 369                 */
 370                if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
 371                                    adev->asic_type == CHIP_NAVI10 ||
 372                                    adev->asic_type == CHIP_NAVI14))
 373                        continue;
 374
 375                /* Good, we can use this VMID. Remember this submission as
 376                 * user of the VMID.
 377                 */
 378                r = amdgpu_sync_fence(&(*id)->active, fence);
 379                if (r)
 380                        return r;
 381
 382                if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
 383                        dma_fence_put((*id)->flushed_updates);
 384                        (*id)->flushed_updates = dma_fence_get(updates);
 385                }
 386
 387                job->vm_needs_flush |= needs_flush;
 388                return 0;
 389        }
 390
 391        *id = NULL;
 392        return 0;
 393}
 394
 395/**
 396 * amdgpu_vm_grab_id - allocate the next free VMID
 397 *
 398 * @vm: vm to allocate id for
 399 * @ring: ring we want to submit job to
 400 * @sync: sync object where we add dependencies
 401 * @fence: fence protecting ID from reuse
 402 * @job: job who wants to use the VMID
 403 *
 404 * Allocate an id for the vm, adding fences to the sync obj as necessary.
 405 */
 406int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 407                     struct amdgpu_sync *sync, struct dma_fence *fence,
 408                     struct amdgpu_job *job)
 409{
 410        struct amdgpu_device *adev = ring->adev;
 411        unsigned vmhub = ring->funcs->vmhub;
 412        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 413        struct amdgpu_vmid *idle = NULL;
 414        struct amdgpu_vmid *id = NULL;
 415        int r = 0;
 416
 417        mutex_lock(&id_mgr->lock);
 418        r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
 419        if (r || !idle)
 420                goto error;
 421
 422        if (vm->reserved_vmid[vmhub]) {
 423                r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
 424                if (r || !id)
 425                        goto error;
 426        } else {
 427                r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
 428                if (r)
 429                        goto error;
 430
 431                if (!id) {
 432                        struct dma_fence *updates = sync->last_vm_update;
 433
 434                        /* Still no ID to use? Then use the idle one found earlier */
 435                        id = idle;
 436
 437                        /* Remember this submission as user of the VMID */
 438                        r = amdgpu_sync_fence(&id->active, fence);
 439                        if (r)
 440                                goto error;
 441
 442                        dma_fence_put(id->flushed_updates);
 443                        id->flushed_updates = dma_fence_get(updates);
 444                        job->vm_needs_flush = true;
 445                }
 446
 447                list_move_tail(&id->list, &id_mgr->ids_lru);
 448        }
 449
 450        id->pd_gpu_addr = job->vm_pd_addr;
 451        id->owner = vm->immediate.fence_context;
 452
 453        if (job->vm_needs_flush) {
 454                dma_fence_put(id->last_flush);
 455                id->last_flush = NULL;
 456        }
 457        job->vmid = id - id_mgr->ids;
 458        job->pasid = vm->pasid;
 459        trace_amdgpu_vm_grab_id(vm, ring, job);
 460
 461error:
 462        mutex_unlock(&id_mgr->lock);
 463        return r;
 464}
 465
 466int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
 467                               struct amdgpu_vm *vm,
 468                               unsigned vmhub)
 469{
 470        struct amdgpu_vmid_mgr *id_mgr;
 471        struct amdgpu_vmid *idle;
 472        int r = 0;
 473
 474        id_mgr = &adev->vm_manager.id_mgr[vmhub];
 475        mutex_lock(&id_mgr->lock);
 476        if (vm->reserved_vmid[vmhub])
 477                goto unlock;
 478        if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
 479            AMDGPU_VM_MAX_RESERVED_VMID) {
 480                DRM_ERROR("Over limitation of reserved vmid\n");
 481                atomic_dec(&id_mgr->reserved_vmid_num);
 482                r = -EINVAL;
 483                goto unlock;
 484        }
 485        /* Select the first entry VMID */
 486        idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
 487        list_del_init(&idle->list);
 488        vm->reserved_vmid[vmhub] = idle;
 489        mutex_unlock(&id_mgr->lock);
 490
 491        return 0;
 492unlock:
 493        mutex_unlock(&id_mgr->lock);
 494        return r;
 495}
 496
 497void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
 498                               struct amdgpu_vm *vm,
 499                               unsigned vmhub)
 500{
 501        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 502
 503        mutex_lock(&id_mgr->lock);
 504        if (vm->reserved_vmid[vmhub]) {
 505                list_add(&vm->reserved_vmid[vmhub]->list,
 506                        &id_mgr->ids_lru);
 507                vm->reserved_vmid[vmhub] = NULL;
 508                atomic_dec(&id_mgr->reserved_vmid_num);
 509        }
 510        mutex_unlock(&id_mgr->lock);
 511}
 512
 513/**
 514 * amdgpu_vmid_reset - reset VMID to zero
 515 *
 516 * @adev: amdgpu device structure
 517 * @vmid: vmid number to use
 518 *
 519 * Reset saved GDW, GWS and OA to force switch on next flush.
 520 */
 521void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
 522                       unsigned vmid)
 523{
 524        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 525        struct amdgpu_vmid *id = &id_mgr->ids[vmid];
 526
 527        mutex_lock(&id_mgr->lock);
 528        id->owner = 0;
 529        id->gds_base = 0;
 530        id->gds_size = 0;
 531        id->gws_base = 0;
 532        id->gws_size = 0;
 533        id->oa_base = 0;
 534        id->oa_size = 0;
 535        mutex_unlock(&id_mgr->lock);
 536}
 537
 538/**
 539 * amdgpu_vmid_reset_all - reset VMID to zero
 540 *
 541 * @adev: amdgpu device structure
 542 *
 543 * Reset VMID to force flush on next use
 544 */
 545void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
 546{
 547        unsigned i, j;
 548
 549        for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
 550                struct amdgpu_vmid_mgr *id_mgr =
 551                        &adev->vm_manager.id_mgr[i];
 552
 553                for (j = 1; j < id_mgr->num_ids; ++j)
 554                        amdgpu_vmid_reset(adev, i, j);
 555        }
 556}
 557
 558/**
 559 * amdgpu_vmid_mgr_init - init the VMID manager
 560 *
 561 * @adev: amdgpu_device pointer
 562 *
 563 * Initialize the VM manager structures
 564 */
 565void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
 566{
 567        unsigned i, j;
 568
 569        for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
 570                struct amdgpu_vmid_mgr *id_mgr =
 571                        &adev->vm_manager.id_mgr[i];
 572
 573                mutex_init(&id_mgr->lock);
 574                INIT_LIST_HEAD(&id_mgr->ids_lru);
 575                atomic_set(&id_mgr->reserved_vmid_num, 0);
 576
 577                /* manage only VMIDs not used by KFD */
 578                id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
 579
 580                /* skip over VMID 0, since it is the system VM */
 581                for (j = 1; j < id_mgr->num_ids; ++j) {
 582                        amdgpu_vmid_reset(adev, i, j);
 583                        amdgpu_sync_create(&id_mgr->ids[j].active);
 584                        list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
 585                }
 586        }
 587}
 588
 589/**
 590 * amdgpu_vmid_mgr_fini - cleanup VM manager
 591 *
 592 * @adev: amdgpu_device pointer
 593 *
 594 * Cleanup the VM manager and free resources.
 595 */
 596void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
 597{
 598        unsigned i, j;
 599
 600        for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
 601                struct amdgpu_vmid_mgr *id_mgr =
 602                        &adev->vm_manager.id_mgr[i];
 603
 604                mutex_destroy(&id_mgr->lock);
 605                for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
 606                        struct amdgpu_vmid *id = &id_mgr->ids[j];
 607
 608                        amdgpu_sync_free(&id->active);
 609                        dma_fence_put(id->flushed_updates);
 610                        dma_fence_put(id->last_flush);
 611                        dma_fence_put(id->pasid_mapping);
 612                }
 613        }
 614}
 615