linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/dma-fence-array.h>
  29#include <linux/interval_tree_generic.h>
  30#include <linux/idr.h>
  31#include <drm/drmP.h>
  32#include <drm/amdgpu_drm.h>
  33#include "amdgpu.h"
  34#include "amdgpu_trace.h"
  35#include "amdgpu_amdkfd.h"
  36#include "amdgpu_gmc.h"
  37#include "amdgpu_xgmi.h"
  38
  39/**
  40 * DOC: GPUVM
  41 *
  42 * GPUVM is similar to the legacy gart on older asics, however
  43 * rather than there being a single global gart table
  44 * for the entire GPU, there are multiple VM page tables active
  45 * at any given time.  The VM page tables can contain a mix
  46 * vram pages and system memory pages and system memory pages
  47 * can be mapped as snooped (cached system pages) or unsnooped
  48 * (uncached system pages).
  49 * Each VM has an ID associated with it and there is a page table
  50 * associated with each VMID.  When execting a command buffer,
  51 * the kernel tells the the ring what VMID to use for that command
  52 * buffer.  VMIDs are allocated dynamically as commands are submitted.
  53 * The userspace drivers maintain their own address space and the kernel
  54 * sets up their pages tables accordingly when they submit their
  55 * command buffers and a VMID is assigned.
  56 * Cayman/Trinity support up to 8 active VMs at any given time;
  57 * SI supports 16.
  58 */
  59
  60#define START(node) ((node)->start)
  61#define LAST(node) ((node)->last)
  62
  63INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
  64                     START, LAST, static, amdgpu_vm_it)
  65
  66#undef START
  67#undef LAST
  68
  69/**
  70 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
  71 */
  72struct amdgpu_prt_cb {
  73
  74        /**
  75         * @adev: amdgpu device
  76         */
  77        struct amdgpu_device *adev;
  78
  79        /**
  80         * @cb: callback
  81         */
  82        struct dma_fence_cb cb;
  83};
  84
  85/**
  86 * amdgpu_vm_level_shift - return the addr shift for each level
  87 *
  88 * @adev: amdgpu_device pointer
  89 * @level: VMPT level
  90 *
  91 * Returns:
  92 * The number of bits the pfn needs to be right shifted for a level.
  93 */
  94static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
  95                                      unsigned level)
  96{
  97        unsigned shift = 0xff;
  98
  99        switch (level) {
 100        case AMDGPU_VM_PDB2:
 101        case AMDGPU_VM_PDB1:
 102        case AMDGPU_VM_PDB0:
 103                shift = 9 * (AMDGPU_VM_PDB0 - level) +
 104                        adev->vm_manager.block_size;
 105                break;
 106        case AMDGPU_VM_PTB:
 107                shift = 0;
 108                break;
 109        default:
 110                dev_err(adev->dev, "the level%d isn't supported.\n", level);
 111        }
 112
 113        return shift;
 114}
 115
 116/**
 117 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
 118 *
 119 * @adev: amdgpu_device pointer
 120 * @level: VMPT level
 121 *
 122 * Returns:
 123 * The number of entries in a page directory or page table.
 124 */
 125static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
 126                                      unsigned level)
 127{
 128        unsigned shift = amdgpu_vm_level_shift(adev,
 129                                               adev->vm_manager.root_level);
 130
 131        if (level == adev->vm_manager.root_level)
 132                /* For the root directory */
 133                return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
 134        else if (level != AMDGPU_VM_PTB)
 135                /* Everything in between */
 136                return 512;
 137        else
 138                /* For the page tables on the leaves */
 139                return AMDGPU_VM_PTE_COUNT(adev);
 140}
 141
 142/**
 143 * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
 144 *
 145 * @adev: amdgpu_device pointer
 146 *
 147 * Returns:
 148 * The number of entries in the root page directory which needs the ATS setting.
 149 */
 150static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
 151{
 152        unsigned shift;
 153
 154        shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
 155        return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
 156}
 157
 158/**
 159 * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
 160 *
 161 * @adev: amdgpu_device pointer
 162 * @level: VMPT level
 163 *
 164 * Returns:
 165 * The mask to extract the entry number of a PD/PT from an address.
 166 */
 167static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
 168                                       unsigned int level)
 169{
 170        if (level <= adev->vm_manager.root_level)
 171                return 0xffffffff;
 172        else if (level != AMDGPU_VM_PTB)
 173                return 0x1ff;
 174        else
 175                return AMDGPU_VM_PTE_COUNT(adev) - 1;
 176}
 177
 178/**
 179 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
 180 *
 181 * @adev: amdgpu_device pointer
 182 * @level: VMPT level
 183 *
 184 * Returns:
 185 * The size of the BO for a page directory or page table in bytes.
 186 */
 187static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
 188{
 189        return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
 190}
 191
 192/**
 193 * amdgpu_vm_bo_evicted - vm_bo is evicted
 194 *
 195 * @vm_bo: vm_bo which is evicted
 196 *
 197 * State for PDs/PTs and per VM BOs which are not at the location they should
 198 * be.
 199 */
 200static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 201{
 202        struct amdgpu_vm *vm = vm_bo->vm;
 203        struct amdgpu_bo *bo = vm_bo->bo;
 204
 205        vm_bo->moved = true;
 206        if (bo->tbo.type == ttm_bo_type_kernel)
 207                list_move(&vm_bo->vm_status, &vm->evicted);
 208        else
 209                list_move_tail(&vm_bo->vm_status, &vm->evicted);
 210}
 211
 212/**
 213 * amdgpu_vm_bo_relocated - vm_bo is reloacted
 214 *
 215 * @vm_bo: vm_bo which is relocated
 216 *
 217 * State for PDs/PTs which needs to update their parent PD.
 218 */
 219static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 220{
 221        list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
 222}
 223
 224/**
 225 * amdgpu_vm_bo_moved - vm_bo is moved
 226 *
 227 * @vm_bo: vm_bo which is moved
 228 *
 229 * State for per VM BOs which are moved, but that change is not yet reflected
 230 * in the page tables.
 231 */
 232static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
 233{
 234        list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 235}
 236
 237/**
 238 * amdgpu_vm_bo_idle - vm_bo is idle
 239 *
 240 * @vm_bo: vm_bo which is now idle
 241 *
 242 * State for PDs/PTs and per VM BOs which have gone through the state machine
 243 * and are now idle.
 244 */
 245static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
 246{
 247        list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
 248        vm_bo->moved = false;
 249}
 250
 251/**
 252 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
 253 *
 254 * @vm_bo: vm_bo which is now invalidated
 255 *
 256 * State for normal BOs which are invalidated and that change not yet reflected
 257 * in the PTs.
 258 */
 259static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 260{
 261        spin_lock(&vm_bo->vm->invalidated_lock);
 262        list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
 263        spin_unlock(&vm_bo->vm->invalidated_lock);
 264}
 265
 266/**
 267 * amdgpu_vm_bo_done - vm_bo is done
 268 *
 269 * @vm_bo: vm_bo which is now done
 270 *
 271 * State for normal BOs which are invalidated and that change has been updated
 272 * in the PTs.
 273 */
 274static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 275{
 276        spin_lock(&vm_bo->vm->invalidated_lock);
 277        list_del_init(&vm_bo->vm_status);
 278        spin_unlock(&vm_bo->vm->invalidated_lock);
 279}
 280
 281/**
 282 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
 283 *
 284 * @base: base structure for tracking BO usage in a VM
 285 * @vm: vm to which bo is to be added
 286 * @bo: amdgpu buffer object
 287 *
 288 * Initialize a bo_va_base structure and add it to the appropriate lists
 289 *
 290 */
 291static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 292                                   struct amdgpu_vm *vm,
 293                                   struct amdgpu_bo *bo)
 294{
 295        base->vm = vm;
 296        base->bo = bo;
 297        base->next = NULL;
 298        INIT_LIST_HEAD(&base->vm_status);
 299
 300        if (!bo)
 301                return;
 302        base->next = bo->vm_bo;
 303        bo->vm_bo = base;
 304
 305        if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
 306                return;
 307
 308        vm->bulk_moveable = false;
 309        if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
 310                amdgpu_vm_bo_relocated(base);
 311        else
 312                amdgpu_vm_bo_idle(base);
 313
 314        if (bo->preferred_domains &
 315            amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
 316                return;
 317
 318        /*
 319         * we checked all the prerequisites, but it looks like this per vm bo
 320         * is currently evicted. add the bo to the evicted list to make sure it
 321         * is validated on next vm use to avoid fault.
 322         * */
 323        amdgpu_vm_bo_evicted(base);
 324}
 325
 326/**
 327 * amdgpu_vm_pt_parent - get the parent page directory
 328 *
 329 * @pt: child page table
 330 *
 331 * Helper to get the parent entry for the child page table. NULL if we are at
 332 * the root page directory.
 333 */
 334static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
 335{
 336        struct amdgpu_bo *parent = pt->base.bo->parent;
 337
 338        if (!parent)
 339                return NULL;
 340
 341        return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
 342}
 343
 344/**
 345 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
 346 */
 347struct amdgpu_vm_pt_cursor {
 348        uint64_t pfn;
 349        struct amdgpu_vm_pt *parent;
 350        struct amdgpu_vm_pt *entry;
 351        unsigned level;
 352};
 353
 354/**
 355 * amdgpu_vm_pt_start - start PD/PT walk
 356 *
 357 * @adev: amdgpu_device pointer
 358 * @vm: amdgpu_vm structure
 359 * @start: start address of the walk
 360 * @cursor: state to initialize
 361 *
 362 * Initialize a amdgpu_vm_pt_cursor to start a walk.
 363 */
 364static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
 365                               struct amdgpu_vm *vm, uint64_t start,
 366                               struct amdgpu_vm_pt_cursor *cursor)
 367{
 368        cursor->pfn = start;
 369        cursor->parent = NULL;
 370        cursor->entry = &vm->root;
 371        cursor->level = adev->vm_manager.root_level;
 372}
 373
 374/**
 375 * amdgpu_vm_pt_descendant - go to child node
 376 *
 377 * @adev: amdgpu_device pointer
 378 * @cursor: current state
 379 *
 380 * Walk to the child node of the current node.
 381 * Returns:
 382 * True if the walk was possible, false otherwise.
 383 */
 384static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
 385                                    struct amdgpu_vm_pt_cursor *cursor)
 386{
 387        unsigned mask, shift, idx;
 388
 389        if (!cursor->entry->entries)
 390                return false;
 391
 392        BUG_ON(!cursor->entry->base.bo);
 393        mask = amdgpu_vm_entries_mask(adev, cursor->level);
 394        shift = amdgpu_vm_level_shift(adev, cursor->level);
 395
 396        ++cursor->level;
 397        idx = (cursor->pfn >> shift) & mask;
 398        cursor->parent = cursor->entry;
 399        cursor->entry = &cursor->entry->entries[idx];
 400        return true;
 401}
 402
 403/**
 404 * amdgpu_vm_pt_sibling - go to sibling node
 405 *
 406 * @adev: amdgpu_device pointer
 407 * @cursor: current state
 408 *
 409 * Walk to the sibling node of the current node.
 410 * Returns:
 411 * True if the walk was possible, false otherwise.
 412 */
 413static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
 414                                 struct amdgpu_vm_pt_cursor *cursor)
 415{
 416        unsigned shift, num_entries;
 417
 418        /* Root doesn't have a sibling */
 419        if (!cursor->parent)
 420                return false;
 421
 422        /* Go to our parents and see if we got a sibling */
 423        shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
 424        num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
 425
 426        if (cursor->entry == &cursor->parent->entries[num_entries - 1])
 427                return false;
 428
 429        cursor->pfn += 1ULL << shift;
 430        cursor->pfn &= ~((1ULL << shift) - 1);
 431        ++cursor->entry;
 432        return true;
 433}
 434
 435/**
 436 * amdgpu_vm_pt_ancestor - go to parent node
 437 *
 438 * @cursor: current state
 439 *
 440 * Walk to the parent node of the current node.
 441 * Returns:
 442 * True if the walk was possible, false otherwise.
 443 */
 444static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
 445{
 446        if (!cursor->parent)
 447                return false;
 448
 449        --cursor->level;
 450        cursor->entry = cursor->parent;
 451        cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
 452        return true;
 453}
 454
 455/**
 456 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
 457 *
 458 * @adev: amdgpu_device pointer
 459 * @cursor: current state
 460 *
 461 * Walk the PD/PT tree to the next node.
 462 */
 463static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
 464                              struct amdgpu_vm_pt_cursor *cursor)
 465{
 466        /* First try a newborn child */
 467        if (amdgpu_vm_pt_descendant(adev, cursor))
 468                return;
 469
 470        /* If that didn't worked try to find a sibling */
 471        while (!amdgpu_vm_pt_sibling(adev, cursor)) {
 472                /* No sibling, go to our parents and grandparents */
 473                if (!amdgpu_vm_pt_ancestor(cursor)) {
 474                        cursor->pfn = ~0ll;
 475                        return;
 476                }
 477        }
 478}
 479
 480/**
 481 * amdgpu_vm_pt_first_dfs - start a deep first search
 482 *
 483 * @adev: amdgpu_device structure
 484 * @vm: amdgpu_vm structure
 485 * @cursor: state to initialize
 486 *
 487 * Starts a deep first traversal of the PD/PT tree.
 488 */
 489static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
 490                                   struct amdgpu_vm *vm,
 491                                   struct amdgpu_vm_pt_cursor *start,
 492                                   struct amdgpu_vm_pt_cursor *cursor)
 493{
 494        if (start)
 495                *cursor = *start;
 496        else
 497                amdgpu_vm_pt_start(adev, vm, 0, cursor);
 498        while (amdgpu_vm_pt_descendant(adev, cursor));
 499}
 500
 501/**
 502 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
 503 *
 504 * @start: starting point for the search
 505 * @entry: current entry
 506 *
 507 * Returns:
 508 * True when the search should continue, false otherwise.
 509 */
 510static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
 511                                      struct amdgpu_vm_pt *entry)
 512{
 513        return entry && (!start || entry != start->entry);
 514}
 515
 516/**
 517 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
 518 *
 519 * @adev: amdgpu_device structure
 520 * @cursor: current state
 521 *
 522 * Move the cursor to the next node in a deep first search.
 523 */
 524static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
 525                                  struct amdgpu_vm_pt_cursor *cursor)
 526{
 527        if (!cursor->entry)
 528                return;
 529
 530        if (!cursor->parent)
 531                cursor->entry = NULL;
 532        else if (amdgpu_vm_pt_sibling(adev, cursor))
 533                while (amdgpu_vm_pt_descendant(adev, cursor));
 534        else
 535                amdgpu_vm_pt_ancestor(cursor);
 536}
 537
 538/**
 539 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
 540 */
 541#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)          \
 542        for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)),          \
 543             (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
 544             amdgpu_vm_pt_continue_dfs((start), (entry));                       \
 545             (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
 546
 547/**
 548 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
 549 *
 550 * @vm: vm providing the BOs
 551 * @validated: head of validation list
 552 * @entry: entry to add
 553 *
 554 * Add the page directory to the list of BOs to
 555 * validate for command submission.
 556 */
 557void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 558                         struct list_head *validated,
 559                         struct amdgpu_bo_list_entry *entry)
 560{
 561        entry->priority = 0;
 562        entry->tv.bo = &vm->root.base.bo->tbo;
 563        /* One for the VM updates, one for TTM and one for the CS job */
 564        entry->tv.num_shared = 3;
 565        entry->user_pages = NULL;
 566        list_add(&entry->tv.head, validated);
 567}
 568
 569void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
 570{
 571        struct amdgpu_bo *abo;
 572        struct amdgpu_vm_bo_base *bo_base;
 573
 574        if (!amdgpu_bo_is_amdgpu_bo(bo))
 575                return;
 576
 577        if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
 578                return;
 579
 580        abo = ttm_to_amdgpu_bo(bo);
 581        if (!abo->parent)
 582                return;
 583        for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
 584                struct amdgpu_vm *vm = bo_base->vm;
 585
 586                if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
 587                        vm->bulk_moveable = false;
 588        }
 589
 590}
 591/**
 592 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
 593 *
 594 * @adev: amdgpu device pointer
 595 * @vm: vm providing the BOs
 596 *
 597 * Move all BOs to the end of LRU and remember their positions to put them
 598 * together.
 599 */
 600void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 601                                struct amdgpu_vm *vm)
 602{
 603        struct ttm_bo_global *glob = adev->mman.bdev.glob;
 604        struct amdgpu_vm_bo_base *bo_base;
 605
 606#if 0
 607        if (vm->bulk_moveable) {
 608                spin_lock(&glob->lru_lock);
 609                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
 610                spin_unlock(&glob->lru_lock);
 611                return;
 612        }
 613#endif
 614
 615        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 616
 617        spin_lock(&glob->lru_lock);
 618        list_for_each_entry(bo_base, &vm->idle, vm_status) {
 619                struct amdgpu_bo *bo = bo_base->bo;
 620
 621                if (!bo->parent)
 622                        continue;
 623
 624                ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
 625                if (bo->shadow)
 626                        ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
 627                                                &vm->lru_bulk_move);
 628        }
 629        spin_unlock(&glob->lru_lock);
 630
 631        vm->bulk_moveable = true;
 632}
 633
 634/**
 635 * amdgpu_vm_validate_pt_bos - validate the page table BOs
 636 *
 637 * @adev: amdgpu device pointer
 638 * @vm: vm providing the BOs
 639 * @validate: callback to do the validation
 640 * @param: parameter for the validation callback
 641 *
 642 * Validate the page table BOs on command submission if neccessary.
 643 *
 644 * Returns:
 645 * Validation result.
 646 */
 647int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 648                              int (*validate)(void *p, struct amdgpu_bo *bo),
 649                              void *param)
 650{
 651        struct amdgpu_vm_bo_base *bo_base, *tmp;
 652        int r = 0;
 653
 654        vm->bulk_moveable &= list_empty(&vm->evicted);
 655
 656        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
 657                struct amdgpu_bo *bo = bo_base->bo;
 658
 659                r = validate(param, bo);
 660                if (r)
 661                        break;
 662
 663                if (bo->tbo.type != ttm_bo_type_kernel) {
 664                        amdgpu_vm_bo_moved(bo_base);
 665                } else {
 666                        vm->update_funcs->map_table(bo);
 667                        if (bo->parent)
 668                                amdgpu_vm_bo_relocated(bo_base);
 669                        else
 670                                amdgpu_vm_bo_idle(bo_base);
 671                }
 672        }
 673
 674        return r;
 675}
 676
 677/**
 678 * amdgpu_vm_ready - check VM is ready for updates
 679 *
 680 * @vm: VM to check
 681 *
 682 * Check if all VM PDs/PTs are ready for updates
 683 *
 684 * Returns:
 685 * True if eviction list is empty.
 686 */
 687bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 688{
 689        return list_empty(&vm->evicted);
 690}
 691
 692/**
 693 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
 694 *
 695 * @adev: amdgpu_device pointer
 696 * @vm: VM to clear BO from
 697 * @bo: BO to clear
 698 *
 699 * Root PD needs to be reserved when calling this.
 700 *
 701 * Returns:
 702 * 0 on success, errno otherwise.
 703 */
 704static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 705                              struct amdgpu_vm *vm,
 706                              struct amdgpu_bo *bo)
 707{
 708        struct ttm_operation_ctx ctx = { true, false };
 709        unsigned level = adev->vm_manager.root_level;
 710        struct amdgpu_vm_update_params params;
 711        struct amdgpu_bo *ancestor = bo;
 712        unsigned entries, ats_entries;
 713        uint64_t addr;
 714        int r;
 715
 716        /* Figure out our place in the hierarchy */
 717        if (ancestor->parent) {
 718                ++level;
 719                while (ancestor->parent->parent) {
 720                        ++level;
 721                        ancestor = ancestor->parent;
 722                }
 723        }
 724
 725        entries = amdgpu_bo_size(bo) / 8;
 726        if (!vm->pte_support_ats) {
 727                ats_entries = 0;
 728
 729        } else if (!bo->parent) {
 730                ats_entries = amdgpu_vm_num_ats_entries(adev);
 731                ats_entries = min(ats_entries, entries);
 732                entries -= ats_entries;
 733
 734        } else {
 735                struct amdgpu_vm_pt *pt;
 736
 737                pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
 738                ats_entries = amdgpu_vm_num_ats_entries(adev);
 739                if ((pt - vm->root.entries) >= ats_entries) {
 740                        ats_entries = 0;
 741                } else {
 742                        ats_entries = entries;
 743                        entries = 0;
 744                }
 745        }
 746
 747        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 748        if (r)
 749                return r;
 750
 751        if (bo->shadow) {
 752                r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
 753                                    &ctx);
 754                if (r)
 755                        return r;
 756        }
 757
 758        r = vm->update_funcs->map_table(bo);
 759        if (r)
 760                return r;
 761
 762        memset(&params, 0, sizeof(params));
 763        params.adev = adev;
 764        params.vm = vm;
 765
 766        r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
 767        if (r)
 768                return r;
 769
 770        addr = 0;
 771        if (ats_entries) {
 772                uint64_t value = 0, flags;
 773
 774                flags = AMDGPU_PTE_DEFAULT_ATC;
 775                if (level != AMDGPU_VM_PTB) {
 776                        /* Handle leaf PDEs as PTEs */
 777                        flags |= AMDGPU_PDE_PTE;
 778                        amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
 779                }
 780
 781                r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
 782                                             value, flags);
 783                if (r)
 784                        return r;
 785
 786                addr += ats_entries * 8;
 787        }
 788
 789        if (entries) {
 790                uint64_t value = 0, flags = 0;
 791
 792                if (adev->asic_type >= CHIP_VEGA10) {
 793                        if (level != AMDGPU_VM_PTB) {
 794                                /* Handle leaf PDEs as PTEs */
 795                                flags |= AMDGPU_PDE_PTE;
 796                                amdgpu_gmc_get_vm_pde(adev, level,
 797                                                      &value, &flags);
 798                        } else {
 799                                /* Workaround for fault priority problem on GMC9 */
 800                                flags = AMDGPU_PTE_EXECUTABLE;
 801                        }
 802                }
 803
 804                r = vm->update_funcs->update(&params, bo, addr, 0, entries,
 805                                             value, flags);
 806                if (r)
 807                        return r;
 808        }
 809
 810        return vm->update_funcs->commit(&params, NULL);
 811}
 812
 813/**
 814 * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
 815 *
 816 * @adev: amdgpu_device pointer
 817 * @vm: requesting vm
 818 * @bp: resulting BO allocation parameters
 819 */
 820static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 821                               int level, struct amdgpu_bo_param *bp)
 822{
 823        memset(bp, 0, sizeof(*bp));
 824
 825        bp->size = amdgpu_vm_bo_size(adev, level);
 826        bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
 827        bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
 828        bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
 829        bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
 830                AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 831        if (vm->use_cpu_for_update)
 832                bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 833        else if (!vm->root.base.bo || vm->root.base.bo->shadow)
 834                bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
 835        bp->type = ttm_bo_type_kernel;
 836        if (vm->root.base.bo)
 837                bp->resv = vm->root.base.bo->tbo.resv;
 838}
 839
 840/**
 841 * amdgpu_vm_alloc_pts - Allocate a specific page table
 842 *
 843 * @adev: amdgpu_device pointer
 844 * @vm: VM to allocate page tables for
 845 * @cursor: Which page table to allocate
 846 *
 847 * Make sure a specific page table or directory is allocated.
 848 *
 849 * Returns:
 850 * 1 if page table needed to be allocated, 0 if page table was already
 851 * allocated, negative errno if an error occurred.
 852 */
 853static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
 854                               struct amdgpu_vm *vm,
 855                               struct amdgpu_vm_pt_cursor *cursor)
 856{
 857        struct amdgpu_vm_pt *entry = cursor->entry;
 858        struct amdgpu_bo_param bp;
 859        struct amdgpu_bo *pt;
 860        int r;
 861
 862        if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
 863                unsigned num_entries;
 864
 865                num_entries = amdgpu_vm_num_entries(adev, cursor->level);
 866                entry->entries = kvmalloc_array(num_entries,
 867                                                sizeof(*entry->entries),
 868                                                GFP_KERNEL | __GFP_ZERO);
 869                if (!entry->entries)
 870                        return -ENOMEM;
 871        }
 872
 873        if (entry->base.bo)
 874                return 0;
 875
 876        amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
 877
 878        r = amdgpu_bo_create(adev, &bp, &pt);
 879        if (r)
 880                return r;
 881
 882        /* Keep a reference to the root directory to avoid
 883         * freeing them up in the wrong order.
 884         */
 885        pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
 886        amdgpu_vm_bo_base_init(&entry->base, vm, pt);
 887
 888        r = amdgpu_vm_clear_bo(adev, vm, pt);
 889        if (r)
 890                goto error_free_pt;
 891
 892        return 0;
 893
 894error_free_pt:
 895        amdgpu_bo_unref(&pt->shadow);
 896        amdgpu_bo_unref(&pt);
 897        return r;
 898}
 899
 900/**
 901 * amdgpu_vm_free_table - fre one PD/PT
 902 *
 903 * @entry: PDE to free
 904 */
 905static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
 906{
 907        if (entry->base.bo) {
 908                entry->base.bo->vm_bo = NULL;
 909                list_del(&entry->base.vm_status);
 910                amdgpu_bo_unref(&entry->base.bo->shadow);
 911                amdgpu_bo_unref(&entry->base.bo);
 912        }
 913        kvfree(entry->entries);
 914        entry->entries = NULL;
 915}
 916
 917/**
 918 * amdgpu_vm_free_pts - free PD/PT levels
 919 *
 920 * @adev: amdgpu device structure
 921 * @vm: amdgpu vm structure
 922 * @start: optional cursor where to start freeing PDs/PTs
 923 *
 924 * Free the page directory or page table level and all sub levels.
 925 */
 926static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
 927                               struct amdgpu_vm *vm,
 928                               struct amdgpu_vm_pt_cursor *start)
 929{
 930        struct amdgpu_vm_pt_cursor cursor;
 931        struct amdgpu_vm_pt *entry;
 932
 933        vm->bulk_moveable = false;
 934
 935        for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
 936                amdgpu_vm_free_table(entry);
 937
 938        if (start)
 939                amdgpu_vm_free_table(start->entry);
 940}
 941
 942/**
 943 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
 944 *
 945 * @adev: amdgpu_device pointer
 946 */
 947void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
 948{
 949        const struct amdgpu_ip_block *ip_block;
 950        bool has_compute_vm_bug;
 951        struct amdgpu_ring *ring;
 952        int i;
 953
 954        has_compute_vm_bug = false;
 955
 956        ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
 957        if (ip_block) {
 958                /* Compute has a VM bug for GFX version < 7.
 959                   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
 960                if (ip_block->version->major <= 7)
 961                        has_compute_vm_bug = true;
 962                else if (ip_block->version->major == 8)
 963                        if (adev->gfx.mec_fw_version < 673)
 964                                has_compute_vm_bug = true;
 965        }
 966
 967        for (i = 0; i < adev->num_rings; i++) {
 968                ring = adev->rings[i];
 969                if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 970                        /* only compute rings */
 971                        ring->has_compute_vm_bug = has_compute_vm_bug;
 972                else
 973                        ring->has_compute_vm_bug = false;
 974        }
 975}
 976
 977/**
 978 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
 979 *
 980 * @ring: ring on which the job will be submitted
 981 * @job: job to submit
 982 *
 983 * Returns:
 984 * True if sync is needed.
 985 */
 986bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 987                                  struct amdgpu_job *job)
 988{
 989        struct amdgpu_device *adev = ring->adev;
 990        unsigned vmhub = ring->funcs->vmhub;
 991        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 992        struct amdgpu_vmid *id;
 993        bool gds_switch_needed;
 994        bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
 995
 996        if (job->vmid == 0)
 997                return false;
 998        id = &id_mgr->ids[job->vmid];
 999        gds_switch_needed = ring->funcs->emit_gds_switch && (
1000                id->gds_base != job->gds_base ||
1001                id->gds_size != job->gds_size ||
1002                id->gws_base != job->gws_base ||
1003                id->gws_size != job->gws_size ||
1004                id->oa_base != job->oa_base ||
1005                id->oa_size != job->oa_size);
1006
1007        if (amdgpu_vmid_had_gpu_reset(adev, id))
1008                return true;
1009
1010        return vm_flush_needed || gds_switch_needed;
1011}
1012
1013/**
1014 * amdgpu_vm_flush - hardware flush the vm
1015 *
1016 * @ring: ring to use for flush
1017 * @job:  related job
1018 * @need_pipe_sync: is pipe sync needed
1019 *
1020 * Emit a VM flush when it is necessary.
1021 *
1022 * Returns:
1023 * 0 on success, errno otherwise.
1024 */
1025int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
1026{
1027        struct amdgpu_device *adev = ring->adev;
1028        unsigned vmhub = ring->funcs->vmhub;
1029        struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1030        struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1031        bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1032                id->gds_base != job->gds_base ||
1033                id->gds_size != job->gds_size ||
1034                id->gws_base != job->gws_base ||
1035                id->gws_size != job->gws_size ||
1036                id->oa_base != job->oa_base ||
1037                id->oa_size != job->oa_size);
1038        bool vm_flush_needed = job->vm_needs_flush;
1039        bool pasid_mapping_needed = id->pasid != job->pasid ||
1040                !id->pasid_mapping ||
1041                !dma_fence_is_signaled(id->pasid_mapping);
1042        struct dma_fence *fence = NULL;
1043        unsigned patch_offset = 0;
1044        int r;
1045
1046        if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1047                gds_switch_needed = true;
1048                vm_flush_needed = true;
1049                pasid_mapping_needed = true;
1050        }
1051
1052        gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1053        vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
1054                        job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1055        pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1056                ring->funcs->emit_wreg;
1057
1058        if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1059                return 0;
1060
1061        if (ring->funcs->init_cond_exec)
1062                patch_offset = amdgpu_ring_init_cond_exec(ring);
1063
1064        if (need_pipe_sync)
1065                amdgpu_ring_emit_pipeline_sync(ring);
1066
1067        if (vm_flush_needed) {
1068                trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1069                amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1070        }
1071
1072        if (pasid_mapping_needed)
1073                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1074
1075        if (vm_flush_needed || pasid_mapping_needed) {
1076                r = amdgpu_fence_emit(ring, &fence, 0);
1077                if (r)
1078                        return r;
1079        }
1080
1081        if (vm_flush_needed) {
1082                mutex_lock(&id_mgr->lock);
1083                dma_fence_put(id->last_flush);
1084                id->last_flush = dma_fence_get(fence);
1085                id->current_gpu_reset_count =
1086                        atomic_read(&adev->gpu_reset_counter);
1087                mutex_unlock(&id_mgr->lock);
1088        }
1089
1090        if (pasid_mapping_needed) {
1091                id->pasid = job->pasid;
1092                dma_fence_put(id->pasid_mapping);
1093                id->pasid_mapping = dma_fence_get(fence);
1094        }
1095        dma_fence_put(fence);
1096
1097        if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1098                id->gds_base = job->gds_base;
1099                id->gds_size = job->gds_size;
1100                id->gws_base = job->gws_base;
1101                id->gws_size = job->gws_size;
1102                id->oa_base = job->oa_base;
1103                id->oa_size = job->oa_size;
1104                amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1105                                            job->gds_size, job->gws_base,
1106                                            job->gws_size, job->oa_base,
1107                                            job->oa_size);
1108        }
1109
1110        if (ring->funcs->patch_cond_exec)
1111                amdgpu_ring_patch_cond_exec(ring, patch_offset);
1112
1113        /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
1114        if (ring->funcs->emit_switch_buffer) {
1115                amdgpu_ring_emit_switch_buffer(ring);
1116                amdgpu_ring_emit_switch_buffer(ring);
1117        }
1118        return 0;
1119}
1120
1121/**
1122 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
1123 *
1124 * @vm: requested vm
1125 * @bo: requested buffer object
1126 *
1127 * Find @bo inside the requested vm.
1128 * Search inside the @bos vm list for the requested vm
1129 * Returns the found bo_va or NULL if none is found
1130 *
1131 * Object has to be reserved!
1132 *
1133 * Returns:
1134 * Found bo_va or NULL.
1135 */
1136struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1137                                       struct amdgpu_bo *bo)
1138{
1139        struct amdgpu_vm_bo_base *base;
1140
1141        for (base = bo->vm_bo; base; base = base->next) {
1142                if (base->vm != vm)
1143                        continue;
1144
1145                return container_of(base, struct amdgpu_bo_va, base);
1146        }
1147        return NULL;
1148}
1149
1150/**
1151 * amdgpu_vm_map_gart - Resolve gart mapping of addr
1152 *
1153 * @pages_addr: optional DMA address to use for lookup
1154 * @addr: the unmapped addr
1155 *
1156 * Look up the physical address of the page that the pte resolves
1157 * to.
1158 *
1159 * Returns:
1160 * The pointer for the page table entry.
1161 */
1162uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1163{
1164        uint64_t result;
1165
1166        /* page table offset */
1167        result = pages_addr[addr >> PAGE_SHIFT];
1168
1169        /* in case cpu page size != gpu page size*/
1170        result |= addr & (~PAGE_MASK);
1171
1172        result &= 0xFFFFFFFFFFFFF000ULL;
1173
1174        return result;
1175}
1176
1177/*
1178 * amdgpu_vm_update_pde - update a single level in the hierarchy
1179 *
1180 * @param: parameters for the update
1181 * @vm: requested vm
1182 * @entry: entry to update
1183 *
1184 * Makes sure the requested entry in parent is up to date.
1185 */
1186static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1187                                struct amdgpu_vm *vm,
1188                                struct amdgpu_vm_pt *entry)
1189{
1190        struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
1191        struct amdgpu_bo *bo = parent->base.bo, *pbo;
1192        uint64_t pde, pt, flags;
1193        unsigned level;
1194
1195        for (level = 0, pbo = bo->parent; pbo; ++level)
1196                pbo = pbo->parent;
1197
1198        level += params->adev->vm_manager.root_level;
1199        amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1200        pde = (entry - parent->entries) * 8;
1201        return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
1202}
1203
1204/*
1205 * amdgpu_vm_invalidate_pds - mark all PDs as invalid
1206 *
1207 * @adev: amdgpu_device pointer
1208 * @vm: related vm
1209 *
1210 * Mark all PD level as invalid after an error.
1211 */
1212static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1213                                     struct amdgpu_vm *vm)
1214{
1215        struct amdgpu_vm_pt_cursor cursor;
1216        struct amdgpu_vm_pt *entry;
1217
1218        for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1219                if (entry->base.bo && !entry->base.moved)
1220                        amdgpu_vm_bo_relocated(&entry->base);
1221}
1222
1223/*
1224 * amdgpu_vm_update_directories - make sure that all directories are valid
1225 *
1226 * @adev: amdgpu_device pointer
1227 * @vm: requested vm
1228 *
1229 * Makes sure all directories are up to date.
1230 *
1231 * Returns:
1232 * 0 for success, error for failure.
1233 */
1234int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1235                                 struct amdgpu_vm *vm)
1236{
1237        struct amdgpu_vm_update_params params;
1238        int r;
1239
1240        if (list_empty(&vm->relocated))
1241                return 0;
1242
1243        memset(&params, 0, sizeof(params));
1244        params.adev = adev;
1245        params.vm = vm;
1246
1247        r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
1248        if (r)
1249                return r;
1250
1251        while (!list_empty(&vm->relocated)) {
1252                struct amdgpu_vm_pt *entry;
1253
1254                entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1255                                         base.vm_status);
1256                amdgpu_vm_bo_idle(&entry->base);
1257
1258                r = amdgpu_vm_update_pde(&params, vm, entry);
1259                if (r)
1260                        goto error;
1261        }
1262
1263        r = vm->update_funcs->commit(&params, &vm->last_update);
1264        if (r)
1265                goto error;
1266        return 0;
1267
1268error:
1269        amdgpu_vm_invalidate_pds(adev, vm);
1270        return r;
1271}
1272
1273/**
1274 * amdgpu_vm_update_flags - figure out flags for PTE updates
1275 *
1276 * Make sure to set the right flags for the PTEs at the desired level.
1277 */
1278static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1279                                   struct amdgpu_bo *bo, unsigned level,
1280                                   uint64_t pe, uint64_t addr,
1281                                   unsigned count, uint32_t incr,
1282                                   uint64_t flags)
1283
1284{
1285        if (level != AMDGPU_VM_PTB) {
1286                flags |= AMDGPU_PDE_PTE;
1287                amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1288
1289        } else if (params->adev->asic_type >= CHIP_VEGA10 &&
1290                   !(flags & AMDGPU_PTE_VALID) &&
1291                   !(flags & AMDGPU_PTE_PRT)) {
1292
1293                /* Workaround for fault priority problem on GMC9 */
1294                flags |= AMDGPU_PTE_EXECUTABLE;
1295        }
1296
1297        params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
1298                                         flags);
1299}
1300
1301/**
1302 * amdgpu_vm_fragment - get fragment for PTEs
1303 *
1304 * @params: see amdgpu_vm_update_params definition
1305 * @start: first PTE to handle
1306 * @end: last PTE to handle
1307 * @flags: hw mapping flags
1308 * @frag: resulting fragment size
1309 * @frag_end: end of this fragment
1310 *
1311 * Returns the first possible fragment for the start and end address.
1312 */
1313static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1314                               uint64_t start, uint64_t end, uint64_t flags,
1315                               unsigned int *frag, uint64_t *frag_end)
1316{
1317        /**
1318         * The MC L1 TLB supports variable sized pages, based on a fragment
1319         * field in the PTE. When this field is set to a non-zero value, page
1320         * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1321         * flags are considered valid for all PTEs within the fragment range
1322         * and corresponding mappings are assumed to be physically contiguous.
1323         *
1324         * The L1 TLB can store a single PTE for the whole fragment,
1325         * significantly increasing the space available for translation
1326         * caching. This leads to large improvements in throughput when the
1327         * TLB is under pressure.
1328         *
1329         * The L2 TLB distributes small and large fragments into two
1330         * asymmetric partitions. The large fragment cache is significantly
1331         * larger. Thus, we try to use large fragments wherever possible.
1332         * Userspace can support this by aligning virtual base address and
1333         * allocation size to the fragment size.
1334         *
1335         * Starting with Vega10 the fragment size only controls the L1. The L2
1336         * is now directly feed with small/huge/giant pages from the walker.
1337         */
1338        unsigned max_frag;
1339
1340        if (params->adev->asic_type < CHIP_VEGA10)
1341                max_frag = params->adev->vm_manager.fragment_size;
1342        else
1343                max_frag = 31;
1344
1345        /* system pages are non continuously */
1346        if (params->pages_addr) {
1347                *frag = 0;
1348                *frag_end = end;
1349                return;
1350        }
1351
1352        /* This intentionally wraps around if no bit is set */
1353        *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1354        if (*frag >= max_frag) {
1355                *frag = max_frag;
1356                *frag_end = end & ~((1ULL << max_frag) - 1);
1357        } else {
1358                *frag_end = start + (1 << *frag);
1359        }
1360}
1361
1362/**
1363 * amdgpu_vm_update_ptes - make sure that page tables are valid
1364 *
1365 * @params: see amdgpu_vm_update_params definition
1366 * @start: start of GPU address range
1367 * @end: end of GPU address range
1368 * @dst: destination address to map to, the next dst inside the function
1369 * @flags: mapping flags
1370 *
1371 * Update the page tables in the range @start - @end.
1372 *
1373 * Returns:
1374 * 0 for success, -EINVAL for failure.
1375 */
1376static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1377                                 uint64_t start, uint64_t end,
1378                                 uint64_t dst, uint64_t flags)
1379{
1380        struct amdgpu_device *adev = params->adev;
1381        struct amdgpu_vm_pt_cursor cursor;
1382        uint64_t frag_start = start, frag_end;
1383        unsigned int frag;
1384        int r;
1385
1386        /* figure out the initial fragment */
1387        amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1388
1389        /* walk over the address space and update the PTs */
1390        amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1391        while (cursor.pfn < end) {
1392                unsigned shift, parent_shift, mask;
1393                uint64_t incr, entry_end, pe_start;
1394                struct amdgpu_bo *pt;
1395
1396                r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
1397                if (r)
1398                        return r;
1399
1400                pt = cursor.entry->base.bo;
1401
1402                /* The root level can't be a huge page */
1403                if (cursor.level == adev->vm_manager.root_level) {
1404                        if (!amdgpu_vm_pt_descendant(adev, &cursor))
1405                                return -ENOENT;
1406                        continue;
1407                }
1408
1409                shift = amdgpu_vm_level_shift(adev, cursor.level);
1410                parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1411                if (adev->asic_type < CHIP_VEGA10 &&
1412                    (flags & AMDGPU_PTE_VALID)) {
1413                        /* No huge page support before GMC v9 */
1414                        if (cursor.level != AMDGPU_VM_PTB) {
1415                                if (!amdgpu_vm_pt_descendant(adev, &cursor))
1416                                        return -ENOENT;
1417                                continue;
1418                        }
1419                } else if (frag < shift) {
1420                        /* We can't use this level when the fragment size is
1421                         * smaller than the address shift. Go to the next
1422                         * child entry and try again.
1423                         */
1424                        if (!amdgpu_vm_pt_descendant(adev, &cursor))
1425                                return -ENOENT;
1426                        continue;
1427                } else if (frag >= parent_shift &&
1428                           cursor.level - 1 != adev->vm_manager.root_level) {
1429                        /* If the fragment size is even larger than the parent
1430                         * shift we should go up one level and check it again
1431                         * unless one level up is the root level.
1432                         */
1433                        if (!amdgpu_vm_pt_ancestor(&cursor))
1434                                return -ENOENT;
1435                        continue;
1436                }
1437
1438                /* Looks good so far, calculate parameters for the update */
1439                incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1440                mask = amdgpu_vm_entries_mask(adev, cursor.level);
1441                pe_start = ((cursor.pfn >> shift) & mask) * 8;
1442                entry_end = (uint64_t)(mask + 1) << shift;
1443                entry_end += cursor.pfn & ~(entry_end - 1);
1444                entry_end = min(entry_end, end);
1445
1446                do {
1447                        uint64_t upd_end = min(entry_end, frag_end);
1448                        unsigned nptes = (upd_end - frag_start) >> shift;
1449
1450                        amdgpu_vm_update_flags(params, pt, cursor.level,
1451                                               pe_start, dst, nptes, incr,
1452                                               flags | AMDGPU_PTE_FRAG(frag));
1453
1454                        pe_start += nptes * 8;
1455                        dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1456
1457                        frag_start = upd_end;
1458                        if (frag_start >= frag_end) {
1459                                /* figure out the next fragment */
1460                                amdgpu_vm_fragment(params, frag_start, end,
1461                                                   flags, &frag, &frag_end);
1462                                if (frag < shift)
1463                                        break;
1464                        }
1465                } while (frag_start < entry_end);
1466
1467                if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1468                        /* Free all child entries */
1469                        while (cursor.pfn < frag_start) {
1470                                amdgpu_vm_free_pts(adev, params->vm, &cursor);
1471                                amdgpu_vm_pt_next(adev, &cursor);
1472                        }
1473
1474                } else if (frag >= shift) {
1475                        /* or just move on to the next on the same level. */
1476                        amdgpu_vm_pt_next(adev, &cursor);
1477                }
1478        }
1479
1480        return 0;
1481}
1482
1483/**
1484 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1485 *
1486 * @adev: amdgpu_device pointer
1487 * @exclusive: fence we need to sync to
1488 * @pages_addr: DMA addresses to use for mapping
1489 * @vm: requested vm
1490 * @start: start of mapped range
1491 * @last: last mapped entry
1492 * @flags: flags for the entries
1493 * @addr: addr to set the area to
1494 * @fence: optional resulting fence
1495 *
1496 * Fill in the page table entries between @start and @last.
1497 *
1498 * Returns:
1499 * 0 for success, -EINVAL for failure.
1500 */
1501static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1502                                       struct dma_fence *exclusive,
1503                                       dma_addr_t *pages_addr,
1504                                       struct amdgpu_vm *vm,
1505                                       uint64_t start, uint64_t last,
1506                                       uint64_t flags, uint64_t addr,
1507                                       struct dma_fence **fence)
1508{
1509        struct amdgpu_vm_update_params params;
1510        void *owner = AMDGPU_FENCE_OWNER_VM;
1511        int r;
1512
1513        memset(&params, 0, sizeof(params));
1514        params.adev = adev;
1515        params.vm = vm;
1516        params.pages_addr = pages_addr;
1517
1518        /* sync to everything except eviction fences on unmapping */
1519        if (!(flags & AMDGPU_PTE_VALID))
1520                owner = AMDGPU_FENCE_OWNER_KFD;
1521
1522        r = vm->update_funcs->prepare(&params, owner, exclusive);
1523        if (r)
1524                return r;
1525
1526        r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
1527        if (r)
1528                return r;
1529
1530        return vm->update_funcs->commit(&params, fence);
1531}
1532
1533/**
1534 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1535 *
1536 * @adev: amdgpu_device pointer
1537 * @exclusive: fence we need to sync to
1538 * @pages_addr: DMA addresses to use for mapping
1539 * @vm: requested vm
1540 * @mapping: mapped range and flags to use for the update
1541 * @flags: HW flags for the mapping
1542 * @bo_adev: amdgpu_device pointer that bo actually been allocated
1543 * @nodes: array of drm_mm_nodes with the MC addresses
1544 * @fence: optional resulting fence
1545 *
1546 * Split the mapping into smaller chunks so that each update fits
1547 * into a SDMA IB.
1548 *
1549 * Returns:
1550 * 0 for success, -EINVAL for failure.
1551 */
1552static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1553                                      struct dma_fence *exclusive,
1554                                      dma_addr_t *pages_addr,
1555                                      struct amdgpu_vm *vm,
1556                                      struct amdgpu_bo_va_mapping *mapping,
1557                                      uint64_t flags,
1558                                      struct amdgpu_device *bo_adev,
1559                                      struct drm_mm_node *nodes,
1560                                      struct dma_fence **fence)
1561{
1562        unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1563        uint64_t pfn, start = mapping->start;
1564        int r;
1565
1566        /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1567         * but in case of something, we filter the flags in first place
1568         */
1569        if (!(mapping->flags & AMDGPU_PTE_READABLE))
1570                flags &= ~AMDGPU_PTE_READABLE;
1571        if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1572                flags &= ~AMDGPU_PTE_WRITEABLE;
1573
1574        flags &= ~AMDGPU_PTE_EXECUTABLE;
1575        flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1576
1577        flags &= ~AMDGPU_PTE_MTYPE_MASK;
1578        flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1579
1580        if ((mapping->flags & AMDGPU_PTE_PRT) &&
1581            (adev->asic_type >= CHIP_VEGA10)) {
1582                flags |= AMDGPU_PTE_PRT;
1583                flags &= ~AMDGPU_PTE_VALID;
1584        }
1585
1586        trace_amdgpu_vm_bo_update(mapping);
1587
1588        pfn = mapping->offset >> PAGE_SHIFT;
1589        if (nodes) {
1590                while (pfn >= nodes->size) {
1591                        pfn -= nodes->size;
1592                        ++nodes;
1593                }
1594        }
1595
1596        do {
1597                dma_addr_t *dma_addr = NULL;
1598                uint64_t max_entries;
1599                uint64_t addr, last;
1600
1601                if (nodes) {
1602                        addr = nodes->start << PAGE_SHIFT;
1603                        max_entries = (nodes->size - pfn) *
1604                                AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1605                } else {
1606                        addr = 0;
1607                        max_entries = S64_MAX;
1608                }
1609
1610                if (pages_addr) {
1611                        uint64_t count;
1612
1613                        for (count = 1;
1614                             count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1615                             ++count) {
1616                                uint64_t idx = pfn + count;
1617
1618                                if (pages_addr[idx] !=
1619                                    (pages_addr[idx - 1] + PAGE_SIZE))
1620                                        break;
1621                        }
1622
1623                        if (count < min_linear_pages) {
1624                                addr = pfn << PAGE_SHIFT;
1625                                dma_addr = pages_addr;
1626                        } else {
1627                                addr = pages_addr[pfn];
1628                                max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1629                        }
1630
1631                } else if (flags & AMDGPU_PTE_VALID) {
1632                        addr += bo_adev->vm_manager.vram_base_offset;
1633                        addr += pfn << PAGE_SHIFT;
1634                }
1635
1636                last = min((uint64_t)mapping->last, start + max_entries - 1);
1637                r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1638                                                start, last, flags, addr,
1639                                                fence);
1640                if (r)
1641                        return r;
1642
1643                pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1644                if (nodes && nodes->size == pfn) {
1645                        pfn = 0;
1646                        ++nodes;
1647                }
1648                start = last + 1;
1649
1650        } while (unlikely(start != mapping->last + 1));
1651
1652        return 0;
1653}
1654
1655/**
1656 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1657 *
1658 * @adev: amdgpu_device pointer
1659 * @bo_va: requested BO and VM object
1660 * @clear: if true clear the entries
1661 *
1662 * Fill in the page table entries for @bo_va.
1663 *
1664 * Returns:
1665 * 0 for success, -EINVAL for failure.
1666 */
1667int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1668                        struct amdgpu_bo_va *bo_va,
1669                        bool clear)
1670{
1671        struct amdgpu_bo *bo = bo_va->base.bo;
1672        struct amdgpu_vm *vm = bo_va->base.vm;
1673        struct amdgpu_bo_va_mapping *mapping;
1674        dma_addr_t *pages_addr = NULL;
1675        struct ttm_mem_reg *mem;
1676        struct drm_mm_node *nodes;
1677        struct dma_fence *exclusive, **last_update;
1678        uint64_t flags;
1679        struct amdgpu_device *bo_adev = adev;
1680        int r;
1681
1682        if (clear || !bo) {
1683                mem = NULL;
1684                nodes = NULL;
1685                exclusive = NULL;
1686        } else {
1687                struct ttm_dma_tt *ttm;
1688
1689                mem = &bo->tbo.mem;
1690                nodes = mem->mm_node;
1691                if (mem->mem_type == TTM_PL_TT) {
1692                        ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1693                        pages_addr = ttm->dma_address;
1694                }
1695                exclusive = reservation_object_get_excl(bo->tbo.resv);
1696        }
1697
1698        if (bo) {
1699                flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1700                bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1701        } else {
1702                flags = 0x0;
1703        }
1704
1705        if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1706                last_update = &vm->last_update;
1707        else
1708                last_update = &bo_va->last_pt_update;
1709
1710        if (!clear && bo_va->base.moved) {
1711                bo_va->base.moved = false;
1712                list_splice_init(&bo_va->valids, &bo_va->invalids);
1713
1714        } else if (bo_va->cleared != clear) {
1715                list_splice_init(&bo_va->valids, &bo_va->invalids);
1716        }
1717
1718        list_for_each_entry(mapping, &bo_va->invalids, list) {
1719                r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1720                                               mapping, flags, bo_adev, nodes,
1721                                               last_update);
1722                if (r)
1723                        return r;
1724        }
1725
1726        if (vm->use_cpu_for_update) {
1727                /* Flush HDP */
1728                mb();
1729                amdgpu_asic_flush_hdp(adev, NULL);
1730        }
1731
1732        /* If the BO is not in its preferred location add it back to
1733         * the evicted list so that it gets validated again on the
1734         * next command submission.
1735         */
1736        if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1737                uint32_t mem_type = bo->tbo.mem.mem_type;
1738
1739                if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1740                        amdgpu_vm_bo_evicted(&bo_va->base);
1741                else
1742                        amdgpu_vm_bo_idle(&bo_va->base);
1743        } else {
1744                amdgpu_vm_bo_done(&bo_va->base);
1745        }
1746
1747        list_splice_init(&bo_va->invalids, &bo_va->valids);
1748        bo_va->cleared = clear;
1749
1750        if (trace_amdgpu_vm_bo_mapping_enabled()) {
1751                list_for_each_entry(mapping, &bo_va->valids, list)
1752                        trace_amdgpu_vm_bo_mapping(mapping);
1753        }
1754
1755        return 0;
1756}
1757
1758/**
1759 * amdgpu_vm_update_prt_state - update the global PRT state
1760 *
1761 * @adev: amdgpu_device pointer
1762 */
1763static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1764{
1765        unsigned long flags;
1766        bool enable;
1767
1768        spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1769        enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1770        adev->gmc.gmc_funcs->set_prt(adev, enable);
1771        spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1772}
1773
1774/**
1775 * amdgpu_vm_prt_get - add a PRT user
1776 *
1777 * @adev: amdgpu_device pointer
1778 */
1779static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1780{
1781        if (!adev->gmc.gmc_funcs->set_prt)
1782                return;
1783
1784        if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1785                amdgpu_vm_update_prt_state(adev);
1786}
1787
1788/**
1789 * amdgpu_vm_prt_put - drop a PRT user
1790 *
1791 * @adev: amdgpu_device pointer
1792 */
1793static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1794{
1795        if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1796                amdgpu_vm_update_prt_state(adev);
1797}
1798
1799/**
1800 * amdgpu_vm_prt_cb - callback for updating the PRT status
1801 *
1802 * @fence: fence for the callback
1803 * @_cb: the callback function
1804 */
1805static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1806{
1807        struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1808
1809        amdgpu_vm_prt_put(cb->adev);
1810        kfree(cb);
1811}
1812
1813/**
1814 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1815 *
1816 * @adev: amdgpu_device pointer
1817 * @fence: fence for the callback
1818 */
1819static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1820                                 struct dma_fence *fence)
1821{
1822        struct amdgpu_prt_cb *cb;
1823
1824        if (!adev->gmc.gmc_funcs->set_prt)
1825                return;
1826
1827        cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1828        if (!cb) {
1829                /* Last resort when we are OOM */
1830                if (fence)
1831                        dma_fence_wait(fence, false);
1832
1833                amdgpu_vm_prt_put(adev);
1834        } else {
1835                cb->adev = adev;
1836                if (!fence || dma_fence_add_callback(fence, &cb->cb,
1837                                                     amdgpu_vm_prt_cb))
1838                        amdgpu_vm_prt_cb(fence, &cb->cb);
1839        }
1840}
1841
1842/**
1843 * amdgpu_vm_free_mapping - free a mapping
1844 *
1845 * @adev: amdgpu_device pointer
1846 * @vm: requested vm
1847 * @mapping: mapping to be freed
1848 * @fence: fence of the unmap operation
1849 *
1850 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1851 */
1852static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1853                                   struct amdgpu_vm *vm,
1854                                   struct amdgpu_bo_va_mapping *mapping,
1855                                   struct dma_fence *fence)
1856{
1857        if (mapping->flags & AMDGPU_PTE_PRT)
1858                amdgpu_vm_add_prt_cb(adev, fence);
1859        kfree(mapping);
1860}
1861
1862/**
1863 * amdgpu_vm_prt_fini - finish all prt mappings
1864 *
1865 * @adev: amdgpu_device pointer
1866 * @vm: requested vm
1867 *
1868 * Register a cleanup callback to disable PRT support after VM dies.
1869 */
1870static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1871{
1872        struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1873        struct dma_fence *excl, **shared;
1874        unsigned i, shared_count;
1875        int r;
1876
1877        r = reservation_object_get_fences_rcu(resv, &excl,
1878                                              &shared_count, &shared);
1879        if (r) {
1880                /* Not enough memory to grab the fence list, as last resort
1881                 * block for all the fences to complete.
1882                 */
1883                reservation_object_wait_timeout_rcu(resv, true, false,
1884                                                    MAX_SCHEDULE_TIMEOUT);
1885                return;
1886        }
1887
1888        /* Add a callback for each fence in the reservation object */
1889        amdgpu_vm_prt_get(adev);
1890        amdgpu_vm_add_prt_cb(adev, excl);
1891
1892        for (i = 0; i < shared_count; ++i) {
1893                amdgpu_vm_prt_get(adev);
1894                amdgpu_vm_add_prt_cb(adev, shared[i]);
1895        }
1896
1897        kfree(shared);
1898}
1899
1900/**
1901 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1902 *
1903 * @adev: amdgpu_device pointer
1904 * @vm: requested vm
1905 * @fence: optional resulting fence (unchanged if no work needed to be done
1906 * or if an error occurred)
1907 *
1908 * Make sure all freed BOs are cleared in the PT.
1909 * PTs have to be reserved and mutex must be locked!
1910 *
1911 * Returns:
1912 * 0 for success.
1913 *
1914 */
1915int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1916                          struct amdgpu_vm *vm,
1917                          struct dma_fence **fence)
1918{
1919        struct amdgpu_bo_va_mapping *mapping;
1920        uint64_t init_pte_value = 0;
1921        struct dma_fence *f = NULL;
1922        int r;
1923
1924        while (!list_empty(&vm->freed)) {
1925                mapping = list_first_entry(&vm->freed,
1926                        struct amdgpu_bo_va_mapping, list);
1927                list_del(&mapping->list);
1928
1929                if (vm->pte_support_ats &&
1930                    mapping->start < AMDGPU_GMC_HOLE_START)
1931                        init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1932
1933                r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1934                                                mapping->start, mapping->last,
1935                                                init_pte_value, 0, &f);
1936                amdgpu_vm_free_mapping(adev, vm, mapping, f);
1937                if (r) {
1938                        dma_fence_put(f);
1939                        return r;
1940                }
1941        }
1942
1943        if (fence && f) {
1944                dma_fence_put(*fence);
1945                *fence = f;
1946        } else {
1947                dma_fence_put(f);
1948        }
1949
1950        return 0;
1951
1952}
1953
1954/**
1955 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1956 *
1957 * @adev: amdgpu_device pointer
1958 * @vm: requested vm
1959 *
1960 * Make sure all BOs which are moved are updated in the PTs.
1961 *
1962 * Returns:
1963 * 0 for success.
1964 *
1965 * PTs have to be reserved!
1966 */
1967int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1968                           struct amdgpu_vm *vm)
1969{
1970        struct amdgpu_bo_va *bo_va, *tmp;
1971        struct reservation_object *resv;
1972        bool clear;
1973        int r;
1974
1975        list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
1976                /* Per VM BOs never need to bo cleared in the page tables */
1977                r = amdgpu_vm_bo_update(adev, bo_va, false);
1978                if (r)
1979                        return r;
1980        }
1981
1982        spin_lock(&vm->invalidated_lock);
1983        while (!list_empty(&vm->invalidated)) {
1984                bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1985                                         base.vm_status);
1986                resv = bo_va->base.bo->tbo.resv;
1987                spin_unlock(&vm->invalidated_lock);
1988
1989                /* Try to reserve the BO to avoid clearing its ptes */
1990                if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1991                        clear = false;
1992                /* Somebody else is using the BO right now */
1993                else
1994                        clear = true;
1995
1996                r = amdgpu_vm_bo_update(adev, bo_va, clear);
1997                if (r)
1998                        return r;
1999
2000                if (!clear)
2001                        reservation_object_unlock(resv);
2002                spin_lock(&vm->invalidated_lock);
2003        }
2004        spin_unlock(&vm->invalidated_lock);
2005
2006        return 0;
2007}
2008
2009/**
2010 * amdgpu_vm_bo_add - add a bo to a specific vm
2011 *
2012 * @adev: amdgpu_device pointer
2013 * @vm: requested vm
2014 * @bo: amdgpu buffer object
2015 *
2016 * Add @bo into the requested vm.
2017 * Add @bo to the list of bos associated with the vm
2018 *
2019 * Returns:
2020 * Newly added bo_va or NULL for failure
2021 *
2022 * Object has to be reserved!
2023 */
2024struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2025                                      struct amdgpu_vm *vm,
2026                                      struct amdgpu_bo *bo)
2027{
2028        struct amdgpu_bo_va *bo_va;
2029
2030        bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2031        if (bo_va == NULL) {
2032                return NULL;
2033        }
2034        amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2035
2036        bo_va->ref_count = 1;
2037        INIT_LIST_HEAD(&bo_va->valids);
2038        INIT_LIST_HEAD(&bo_va->invalids);
2039
2040        if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
2041            (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
2042                bo_va->is_xgmi = true;
2043                mutex_lock(&adev->vm_manager.lock_pstate);
2044                /* Power up XGMI if it can be potentially used */
2045                if (++adev->vm_manager.xgmi_map_counter == 1)
2046                        amdgpu_xgmi_set_pstate(adev, 1);
2047                mutex_unlock(&adev->vm_manager.lock_pstate);
2048        }
2049
2050        return bo_va;
2051}
2052
2053
2054/**
2055 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2056 *
2057 * @adev: amdgpu_device pointer
2058 * @bo_va: bo_va to store the address
2059 * @mapping: the mapping to insert
2060 *
2061 * Insert a new mapping into all structures.
2062 */
2063static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2064                                    struct amdgpu_bo_va *bo_va,
2065                                    struct amdgpu_bo_va_mapping *mapping)
2066{
2067        struct amdgpu_vm *vm = bo_va->base.vm;
2068        struct amdgpu_bo *bo = bo_va->base.bo;
2069
2070        mapping->bo_va = bo_va;
2071        list_add(&mapping->list, &bo_va->invalids);
2072        amdgpu_vm_it_insert(mapping, &vm->va);
2073
2074        if (mapping->flags & AMDGPU_PTE_PRT)
2075                amdgpu_vm_prt_get(adev);
2076
2077        if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2078            !bo_va->base.moved) {
2079                list_move(&bo_va->base.vm_status, &vm->moved);
2080        }
2081        trace_amdgpu_vm_bo_map(bo_va, mapping);
2082}
2083
2084/**
2085 * amdgpu_vm_bo_map - map bo inside a vm
2086 *
2087 * @adev: amdgpu_device pointer
2088 * @bo_va: bo_va to store the address
2089 * @saddr: where to map the BO
2090 * @offset: requested offset in the BO
2091 * @size: BO size in bytes
2092 * @flags: attributes of pages (read/write/valid/etc.)
2093 *
2094 * Add a mapping of the BO at the specefied addr into the VM.
2095 *
2096 * Returns:
2097 * 0 for success, error for failure.
2098 *
2099 * Object has to be reserved and unreserved outside!
2100 */
2101int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2102                     struct amdgpu_bo_va *bo_va,
2103                     uint64_t saddr, uint64_t offset,
2104                     uint64_t size, uint64_t flags)
2105{
2106        struct amdgpu_bo_va_mapping *mapping, *tmp;
2107        struct amdgpu_bo *bo = bo_va->base.bo;
2108        struct amdgpu_vm *vm = bo_va->base.vm;
2109        uint64_t eaddr;
2110
2111        /* validate the parameters */
2112        if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2113            size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2114                return -EINVAL;
2115
2116        /* make sure object fit at this offset */
2117        eaddr = saddr + size - 1;
2118        if (saddr >= eaddr ||
2119            (bo && offset + size > amdgpu_bo_size(bo)))
2120                return -EINVAL;
2121
2122        saddr /= AMDGPU_GPU_PAGE_SIZE;
2123        eaddr /= AMDGPU_GPU_PAGE_SIZE;
2124
2125        tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2126        if (tmp) {
2127                /* bo and tmp overlap, invalid addr */
2128                dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2129                        "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2130                        tmp->start, tmp->last + 1);
2131                return -EINVAL;
2132        }
2133
2134        mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2135        if (!mapping)
2136                return -ENOMEM;
2137
2138        mapping->start = saddr;
2139        mapping->last = eaddr;
2140        mapping->offset = offset;
2141        mapping->flags = flags;
2142
2143        amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2144
2145        return 0;
2146}
2147
2148/**
2149 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2150 *
2151 * @adev: amdgpu_device pointer
2152 * @bo_va: bo_va to store the address
2153 * @saddr: where to map the BO
2154 * @offset: requested offset in the BO
2155 * @size: BO size in bytes
2156 * @flags: attributes of pages (read/write/valid/etc.)
2157 *
2158 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2159 * mappings as we do so.
2160 *
2161 * Returns:
2162 * 0 for success, error for failure.
2163 *
2164 * Object has to be reserved and unreserved outside!
2165 */
2166int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2167                             struct amdgpu_bo_va *bo_va,
2168                             uint64_t saddr, uint64_t offset,
2169                             uint64_t size, uint64_t flags)
2170{
2171        struct amdgpu_bo_va_mapping *mapping;
2172        struct amdgpu_bo *bo = bo_va->base.bo;
2173        uint64_t eaddr;
2174        int r;
2175
2176        /* validate the parameters */
2177        if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2178            size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2179                return -EINVAL;
2180
2181        /* make sure object fit at this offset */
2182        eaddr = saddr + size - 1;
2183        if (saddr >= eaddr ||
2184            (bo && offset + size > amdgpu_bo_size(bo)))
2185                return -EINVAL;
2186
2187        /* Allocate all the needed memory */
2188        mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2189        if (!mapping)
2190                return -ENOMEM;
2191
2192        r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2193        if (r) {
2194                kfree(mapping);
2195                return r;
2196        }
2197
2198        saddr /= AMDGPU_GPU_PAGE_SIZE;
2199        eaddr /= AMDGPU_GPU_PAGE_SIZE;
2200
2201        mapping->start = saddr;
2202        mapping->last = eaddr;
2203        mapping->offset = offset;
2204        mapping->flags = flags;
2205
2206        amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2207
2208        return 0;
2209}
2210
2211/**
2212 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2213 *
2214 * @adev: amdgpu_device pointer
2215 * @bo_va: bo_va to remove the address from
2216 * @saddr: where to the BO is mapped
2217 *
2218 * Remove a mapping of the BO at the specefied addr from the VM.
2219 *
2220 * Returns:
2221 * 0 for success, error for failure.
2222 *
2223 * Object has to be reserved and unreserved outside!
2224 */
2225int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2226                       struct amdgpu_bo_va *bo_va,
2227                       uint64_t saddr)
2228{
2229        struct amdgpu_bo_va_mapping *mapping;
2230        struct amdgpu_vm *vm = bo_va->base.vm;
2231        bool valid = true;
2232
2233        saddr /= AMDGPU_GPU_PAGE_SIZE;
2234
2235        list_for_each_entry(mapping, &bo_va->valids, list) {
2236                if (mapping->start == saddr)
2237                        break;
2238        }
2239
2240        if (&mapping->list == &bo_va->valids) {
2241                valid = false;
2242
2243                list_for_each_entry(mapping, &bo_va->invalids, list) {
2244                        if (mapping->start == saddr)
2245                                break;
2246                }
2247
2248                if (&mapping->list == &bo_va->invalids)
2249                        return -ENOENT;
2250        }
2251
2252        list_del(&mapping->list);
2253        amdgpu_vm_it_remove(mapping, &vm->va);
2254        mapping->bo_va = NULL;
2255        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2256
2257        if (valid)
2258                list_add(&mapping->list, &vm->freed);
2259        else
2260                amdgpu_vm_free_mapping(adev, vm, mapping,
2261                                       bo_va->last_pt_update);
2262
2263        return 0;
2264}
2265
2266/**
2267 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2268 *
2269 * @adev: amdgpu_device pointer
2270 * @vm: VM structure to use
2271 * @saddr: start of the range
2272 * @size: size of the range
2273 *
2274 * Remove all mappings in a range, split them as appropriate.
2275 *
2276 * Returns:
2277 * 0 for success, error for failure.
2278 */
2279int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2280                                struct amdgpu_vm *vm,
2281                                uint64_t saddr, uint64_t size)
2282{
2283        struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2284        LIST_HEAD(removed);
2285        uint64_t eaddr;
2286
2287        eaddr = saddr + size - 1;
2288        saddr /= AMDGPU_GPU_PAGE_SIZE;
2289        eaddr /= AMDGPU_GPU_PAGE_SIZE;
2290
2291        /* Allocate all the needed memory */
2292        before = kzalloc(sizeof(*before), GFP_KERNEL);
2293        if (!before)
2294                return -ENOMEM;
2295        INIT_LIST_HEAD(&before->list);
2296
2297        after = kzalloc(sizeof(*after), GFP_KERNEL);
2298        if (!after) {
2299                kfree(before);
2300                return -ENOMEM;
2301        }
2302        INIT_LIST_HEAD(&after->list);
2303
2304        /* Now gather all removed mappings */
2305        tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2306        while (tmp) {
2307                /* Remember mapping split at the start */
2308                if (tmp->start < saddr) {
2309                        before->start = tmp->start;
2310                        before->last = saddr - 1;
2311                        before->offset = tmp->offset;
2312                        before->flags = tmp->flags;
2313                        before->bo_va = tmp->bo_va;
2314                        list_add(&before->list, &tmp->bo_va->invalids);
2315                }
2316
2317                /* Remember mapping split at the end */
2318                if (tmp->last > eaddr) {
2319                        after->start = eaddr + 1;
2320                        after->last = tmp->last;
2321                        after->offset = tmp->offset;
2322                        after->offset += after->start - tmp->start;
2323                        after->flags = tmp->flags;
2324                        after->bo_va = tmp->bo_va;
2325                        list_add(&after->list, &tmp->bo_va->invalids);
2326                }
2327
2328                list_del(&tmp->list);
2329                list_add(&tmp->list, &removed);
2330
2331                tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2332        }
2333
2334        /* And free them up */
2335        list_for_each_entry_safe(tmp, next, &removed, list) {
2336                amdgpu_vm_it_remove(tmp, &vm->va);
2337                list_del(&tmp->list);
2338
2339                if (tmp->start < saddr)
2340                    tmp->start = saddr;
2341                if (tmp->last > eaddr)
2342                    tmp->last = eaddr;
2343
2344                tmp->bo_va = NULL;
2345                list_add(&tmp->list, &vm->freed);
2346                trace_amdgpu_vm_bo_unmap(NULL, tmp);
2347        }
2348
2349        /* Insert partial mapping before the range */
2350        if (!list_empty(&before->list)) {
2351                amdgpu_vm_it_insert(before, &vm->va);
2352                if (before->flags & AMDGPU_PTE_PRT)
2353                        amdgpu_vm_prt_get(adev);
2354        } else {
2355                kfree(before);
2356        }
2357
2358        /* Insert partial mapping after the range */
2359        if (!list_empty(&after->list)) {
2360                amdgpu_vm_it_insert(after, &vm->va);
2361                if (after->flags & AMDGPU_PTE_PRT)
2362                        amdgpu_vm_prt_get(adev);
2363        } else {
2364                kfree(after);
2365        }
2366
2367        return 0;
2368}
2369
2370/**
2371 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2372 *
2373 * @vm: the requested VM
2374 * @addr: the address
2375 *
2376 * Find a mapping by it's address.
2377 *
2378 * Returns:
2379 * The amdgpu_bo_va_mapping matching for addr or NULL
2380 *
2381 */
2382struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2383                                                         uint64_t addr)
2384{
2385        return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2386}
2387
2388/**
2389 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2390 *
2391 * @vm: the requested vm
2392 * @ticket: CS ticket
2393 *
2394 * Trace all mappings of BOs reserved during a command submission.
2395 */
2396void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2397{
2398        struct amdgpu_bo_va_mapping *mapping;
2399
2400        if (!trace_amdgpu_vm_bo_cs_enabled())
2401                return;
2402
2403        for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2404             mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2405                if (mapping->bo_va && mapping->bo_va->base.bo) {
2406                        struct amdgpu_bo *bo;
2407
2408                        bo = mapping->bo_va->base.bo;
2409                        if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2410                                continue;
2411                }
2412
2413                trace_amdgpu_vm_bo_cs(mapping);
2414        }
2415}
2416
2417/**
2418 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2419 *
2420 * @adev: amdgpu_device pointer
2421 * @bo_va: requested bo_va
2422 *
2423 * Remove @bo_va->bo from the requested vm.
2424 *
2425 * Object have to be reserved!
2426 */
2427void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2428                      struct amdgpu_bo_va *bo_va)
2429{
2430        struct amdgpu_bo_va_mapping *mapping, *next;
2431        struct amdgpu_bo *bo = bo_va->base.bo;
2432        struct amdgpu_vm *vm = bo_va->base.vm;
2433        struct amdgpu_vm_bo_base **base;
2434
2435        if (bo) {
2436                if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2437                        vm->bulk_moveable = false;
2438
2439                for (base = &bo_va->base.bo->vm_bo; *base;
2440                     base = &(*base)->next) {
2441                        if (*base != &bo_va->base)
2442                                continue;
2443
2444                        *base = bo_va->base.next;
2445                        break;
2446                }
2447        }
2448
2449        spin_lock(&vm->invalidated_lock);
2450        list_del(&bo_va->base.vm_status);
2451        spin_unlock(&vm->invalidated_lock);
2452
2453        list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2454                list_del(&mapping->list);
2455                amdgpu_vm_it_remove(mapping, &vm->va);
2456                mapping->bo_va = NULL;
2457                trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2458                list_add(&mapping->list, &vm->freed);
2459        }
2460        list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2461                list_del(&mapping->list);
2462                amdgpu_vm_it_remove(mapping, &vm->va);
2463                amdgpu_vm_free_mapping(adev, vm, mapping,
2464                                       bo_va->last_pt_update);
2465        }
2466
2467        dma_fence_put(bo_va->last_pt_update);
2468
2469        if (bo && bo_va->is_xgmi) {
2470                mutex_lock(&adev->vm_manager.lock_pstate);
2471                if (--adev->vm_manager.xgmi_map_counter == 0)
2472                        amdgpu_xgmi_set_pstate(adev, 0);
2473                mutex_unlock(&adev->vm_manager.lock_pstate);
2474        }
2475
2476        kfree(bo_va);
2477}
2478
2479/**
2480 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2481 *
2482 * @adev: amdgpu_device pointer
2483 * @bo: amdgpu buffer object
2484 * @evicted: is the BO evicted
2485 *
2486 * Mark @bo as invalid.
2487 */
2488void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2489                             struct amdgpu_bo *bo, bool evicted)
2490{
2491        struct amdgpu_vm_bo_base *bo_base;
2492
2493        /* shadow bo doesn't have bo base, its validation needs its parent */
2494        if (bo->parent && bo->parent->shadow == bo)
2495                bo = bo->parent;
2496
2497        for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2498                struct amdgpu_vm *vm = bo_base->vm;
2499
2500                if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2501                        amdgpu_vm_bo_evicted(bo_base);
2502                        continue;
2503                }
2504
2505                if (bo_base->moved)
2506                        continue;
2507                bo_base->moved = true;
2508
2509                if (bo->tbo.type == ttm_bo_type_kernel)
2510                        amdgpu_vm_bo_relocated(bo_base);
2511                else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
2512                        amdgpu_vm_bo_moved(bo_base);
2513                else
2514                        amdgpu_vm_bo_invalidated(bo_base);
2515        }
2516}
2517
2518/**
2519 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2520 *
2521 * @vm_size: VM size
2522 *
2523 * Returns:
2524 * VM page table as power of two
2525 */
2526static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2527{
2528        /* Total bits covered by PD + PTs */
2529        unsigned bits = ilog2(vm_size) + 18;
2530
2531        /* Make sure the PD is 4K in size up to 8GB address space.
2532           Above that split equal between PD and PTs */
2533        if (vm_size <= 8)
2534                return (bits - 9);
2535        else
2536                return ((bits + 3) / 2);
2537}
2538
2539/**
2540 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2541 *
2542 * @adev: amdgpu_device pointer
2543 * @min_vm_size: the minimum vm size in GB if it's set auto
2544 * @fragment_size_default: Default PTE fragment size
2545 * @max_level: max VMPT level
2546 * @max_bits: max address space size in bits
2547 *
2548 */
2549void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2550                           uint32_t fragment_size_default, unsigned max_level,
2551                           unsigned max_bits)
2552{
2553        unsigned int max_size = 1 << (max_bits - 30);
2554        unsigned int vm_size;
2555        uint64_t tmp;
2556
2557        /* adjust vm size first */
2558        if (amdgpu_vm_size != -1) {
2559                vm_size = amdgpu_vm_size;
2560                if (vm_size > max_size) {
2561                        dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2562                                 amdgpu_vm_size, max_size);
2563                        vm_size = max_size;
2564                }
2565        } else {
2566                struct sysinfo si;
2567                unsigned int phys_ram_gb;
2568
2569                /* Optimal VM size depends on the amount of physical
2570                 * RAM available. Underlying requirements and
2571                 * assumptions:
2572                 *
2573                 *  - Need to map system memory and VRAM from all GPUs
2574                 *     - VRAM from other GPUs not known here
2575                 *     - Assume VRAM <= system memory
2576                 *  - On GFX8 and older, VM space can be segmented for
2577                 *    different MTYPEs
2578                 *  - Need to allow room for fragmentation, guard pages etc.
2579                 *
2580                 * This adds up to a rough guess of system memory x3.
2581                 * Round up to power of two to maximize the available
2582                 * VM size with the given page table size.
2583                 */
2584                si_meminfo(&si);
2585                phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2586                               (1 << 30) - 1) >> 30;
2587                vm_size = roundup_pow_of_two(
2588                        min(max(phys_ram_gb * 3, min_vm_size), max_size));
2589        }
2590
2591        adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2592
2593        tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2594        if (amdgpu_vm_block_size != -1)
2595                tmp >>= amdgpu_vm_block_size - 9;
2596        tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2597        adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2598        switch (adev->vm_manager.num_level) {
2599        case 3:
2600                adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2601                break;
2602        case 2:
2603                adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2604                break;
2605        case 1:
2606                adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2607                break;
2608        default:
2609                dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2610        }
2611        /* block size depends on vm size and hw setup*/
2612        if (amdgpu_vm_block_size != -1)
2613                adev->vm_manager.block_size =
2614                        min((unsigned)amdgpu_vm_block_size, max_bits
2615                            - AMDGPU_GPU_PAGE_SHIFT
2616                            - 9 * adev->vm_manager.num_level);
2617        else if (adev->vm_manager.num_level > 1)
2618                adev->vm_manager.block_size = 9;
2619        else
2620                adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2621
2622        if (amdgpu_vm_fragment_size == -1)
2623                adev->vm_manager.fragment_size = fragment_size_default;
2624        else
2625                adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2626
2627        DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2628                 vm_size, adev->vm_manager.num_level + 1,
2629                 adev->vm_manager.block_size,
2630                 adev->vm_manager.fragment_size);
2631}
2632
2633/**
2634 * amdgpu_vm_wait_idle - wait for the VM to become idle
2635 *
2636 * @vm: VM object to wait for
2637 * @timeout: timeout to wait for VM to become idle
2638 */
2639long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2640{
2641        return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
2642                                                   true, true, timeout);
2643}
2644
2645/**
2646 * amdgpu_vm_init - initialize a vm instance
2647 *
2648 * @adev: amdgpu_device pointer
2649 * @vm: requested vm
2650 * @vm_context: Indicates if it GFX or Compute context
2651 * @pasid: Process address space identifier
2652 *
2653 * Init @vm fields.
2654 *
2655 * Returns:
2656 * 0 for success, error for failure.
2657 */
2658int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2659                   int vm_context, unsigned int pasid)
2660{
2661        struct amdgpu_bo_param bp;
2662        struct amdgpu_bo *root;
2663        int r, i;
2664
2665        vm->va = RB_ROOT_CACHED;
2666        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2667                vm->reserved_vmid[i] = NULL;
2668        INIT_LIST_HEAD(&vm->evicted);
2669        INIT_LIST_HEAD(&vm->relocated);
2670        INIT_LIST_HEAD(&vm->moved);
2671        INIT_LIST_HEAD(&vm->idle);
2672        INIT_LIST_HEAD(&vm->invalidated);
2673        spin_lock_init(&vm->invalidated_lock);
2674        INIT_LIST_HEAD(&vm->freed);
2675
2676        /* create scheduler entity for page table updates */
2677        r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
2678                                  adev->vm_manager.vm_pte_num_rqs, NULL);
2679        if (r)
2680                return r;
2681
2682        vm->pte_support_ats = false;
2683
2684        if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2685                vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2686                                                AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2687
2688                if (adev->asic_type == CHIP_RAVEN)
2689                        vm->pte_support_ats = true;
2690        } else {
2691                vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2692                                                AMDGPU_VM_USE_CPU_FOR_GFX);
2693        }
2694        DRM_DEBUG_DRIVER("VM update mode is %s\n",
2695                         vm->use_cpu_for_update ? "CPU" : "SDMA");
2696        WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2697                  "CPU update of VM recommended only for large BAR system\n");
2698
2699        if (vm->use_cpu_for_update)
2700                vm->update_funcs = &amdgpu_vm_cpu_funcs;
2701        else
2702                vm->update_funcs = &amdgpu_vm_sdma_funcs;
2703        vm->last_update = NULL;
2704
2705        amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
2706        if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2707                bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2708        r = amdgpu_bo_create(adev, &bp, &root);
2709        if (r)
2710                goto error_free_sched_entity;
2711
2712        r = amdgpu_bo_reserve(root, true);
2713        if (r)
2714                goto error_free_root;
2715
2716        r = reservation_object_reserve_shared(root->tbo.resv, 1);
2717        if (r)
2718                goto error_unreserve;
2719
2720        amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2721
2722        r = amdgpu_vm_clear_bo(adev, vm, root);
2723        if (r)
2724                goto error_unreserve;
2725
2726        amdgpu_bo_unreserve(vm->root.base.bo);
2727
2728        if (pasid) {
2729                unsigned long flags;
2730
2731                spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2732                r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2733                              GFP_ATOMIC);
2734                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2735                if (r < 0)
2736                        goto error_free_root;
2737
2738                vm->pasid = pasid;
2739        }
2740
2741        INIT_KFIFO(vm->faults);
2742
2743        return 0;
2744
2745error_unreserve:
2746        amdgpu_bo_unreserve(vm->root.base.bo);
2747
2748error_free_root:
2749        amdgpu_bo_unref(&vm->root.base.bo->shadow);
2750        amdgpu_bo_unref(&vm->root.base.bo);
2751        vm->root.base.bo = NULL;
2752
2753error_free_sched_entity:
2754        drm_sched_entity_destroy(&vm->entity);
2755
2756        return r;
2757}
2758
2759/**
2760 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2761 *
2762 * @adev: amdgpu_device pointer
2763 * @vm: the VM to check
2764 *
2765 * check all entries of the root PD, if any subsequent PDs are allocated,
2766 * it means there are page table creating and filling, and is no a clean
2767 * VM
2768 *
2769 * Returns:
2770 *      0 if this VM is clean
2771 */
2772static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2773        struct amdgpu_vm *vm)
2774{
2775        enum amdgpu_vm_level root = adev->vm_manager.root_level;
2776        unsigned int entries = amdgpu_vm_num_entries(adev, root);
2777        unsigned int i = 0;
2778
2779        if (!(vm->root.entries))
2780                return 0;
2781
2782        for (i = 0; i < entries; i++) {
2783                if (vm->root.entries[i].base.bo)
2784                        return -EINVAL;
2785        }
2786
2787        return 0;
2788}
2789
2790/**
2791 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2792 *
2793 * @adev: amdgpu_device pointer
2794 * @vm: requested vm
2795 *
2796 * This only works on GFX VMs that don't have any BOs added and no
2797 * page tables allocated yet.
2798 *
2799 * Changes the following VM parameters:
2800 * - use_cpu_for_update
2801 * - pte_supports_ats
2802 * - pasid (old PASID is released, because compute manages its own PASIDs)
2803 *
2804 * Reinitializes the page directory to reflect the changed ATS
2805 * setting.
2806 *
2807 * Returns:
2808 * 0 for success, -errno for errors.
2809 */
2810int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
2811{
2812        bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2813        int r;
2814
2815        r = amdgpu_bo_reserve(vm->root.base.bo, true);
2816        if (r)
2817                return r;
2818
2819        /* Sanity checks */
2820        r = amdgpu_vm_check_clean_reserved(adev, vm);
2821        if (r)
2822                goto unreserve_bo;
2823
2824        if (pasid) {
2825                unsigned long flags;
2826
2827                spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2828                r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2829                              GFP_ATOMIC);
2830                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2831
2832                if (r == -ENOSPC)
2833                        goto unreserve_bo;
2834                r = 0;
2835        }
2836
2837        /* Check if PD needs to be reinitialized and do it before
2838         * changing any other state, in case it fails.
2839         */
2840        if (pte_support_ats != vm->pte_support_ats) {
2841                vm->pte_support_ats = pte_support_ats;
2842                r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
2843                if (r)
2844                        goto free_idr;
2845        }
2846
2847        /* Update VM state */
2848        vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2849                                    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2850        DRM_DEBUG_DRIVER("VM update mode is %s\n",
2851                         vm->use_cpu_for_update ? "CPU" : "SDMA");
2852        WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2853                  "CPU update of VM recommended only for large BAR system\n");
2854
2855        if (vm->pasid) {
2856                unsigned long flags;
2857
2858                spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2859                idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2860                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2861
2862                /* Free the original amdgpu allocated pasid
2863                 * Will be replaced with kfd allocated pasid
2864                 */
2865                amdgpu_pasid_free(vm->pasid);
2866                vm->pasid = 0;
2867        }
2868
2869        /* Free the shadow bo for compute VM */
2870        amdgpu_bo_unref(&vm->root.base.bo->shadow);
2871
2872        if (pasid)
2873                vm->pasid = pasid;
2874
2875        goto unreserve_bo;
2876
2877free_idr:
2878        if (pasid) {
2879                unsigned long flags;
2880
2881                spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2882                idr_remove(&adev->vm_manager.pasid_idr, pasid);
2883                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2884        }
2885unreserve_bo:
2886        amdgpu_bo_unreserve(vm->root.base.bo);
2887        return r;
2888}
2889
2890/**
2891 * amdgpu_vm_release_compute - release a compute vm
2892 * @adev: amdgpu_device pointer
2893 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2894 *
2895 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2896 * pasid from vm. Compute should stop use of vm after this call.
2897 */
2898void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2899{
2900        if (vm->pasid) {
2901                unsigned long flags;
2902
2903                spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2904                idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2905                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2906        }
2907        vm->pasid = 0;
2908}
2909
2910/**
2911 * amdgpu_vm_fini - tear down a vm instance
2912 *
2913 * @adev: amdgpu_device pointer
2914 * @vm: requested vm
2915 *
2916 * Tear down @vm.
2917 * Unbind the VM and remove all bos from the vm bo list
2918 */
2919void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2920{
2921        struct amdgpu_bo_va_mapping *mapping, *tmp;
2922        bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2923        struct amdgpu_bo *root;
2924        int i, r;
2925
2926        amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2927
2928        if (vm->pasid) {
2929                unsigned long flags;
2930
2931                spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2932                idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2933                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2934        }
2935
2936        drm_sched_entity_destroy(&vm->entity);
2937
2938        if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2939                dev_err(adev->dev, "still active bo inside vm\n");
2940        }
2941        rbtree_postorder_for_each_entry_safe(mapping, tmp,
2942                                             &vm->va.rb_root, rb) {
2943                /* Don't remove the mapping here, we don't want to trigger a
2944                 * rebalance and the tree is about to be destroyed anyway.
2945                 */
2946                list_del(&mapping->list);
2947                kfree(mapping);
2948        }
2949        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2950                if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2951                        amdgpu_vm_prt_fini(adev, vm);
2952                        prt_fini_needed = false;
2953                }
2954
2955                list_del(&mapping->list);
2956                amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2957        }
2958
2959        root = amdgpu_bo_ref(vm->root.base.bo);
2960        r = amdgpu_bo_reserve(root, true);
2961        if (r) {
2962                dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2963        } else {
2964                amdgpu_vm_free_pts(adev, vm, NULL);
2965                amdgpu_bo_unreserve(root);
2966        }
2967        amdgpu_bo_unref(&root);
2968        WARN_ON(vm->root.base.bo);
2969        dma_fence_put(vm->last_update);
2970        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2971                amdgpu_vmid_free_reserved(adev, vm, i);
2972}
2973
2974/**
2975 * amdgpu_vm_manager_init - init the VM manager
2976 *
2977 * @adev: amdgpu_device pointer
2978 *
2979 * Initialize the VM manager structures
2980 */
2981void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2982{
2983        unsigned i;
2984
2985        amdgpu_vmid_mgr_init(adev);
2986
2987        adev->vm_manager.fence_context =
2988                dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2989        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2990                adev->vm_manager.seqno[i] = 0;
2991
2992        spin_lock_init(&adev->vm_manager.prt_lock);
2993        atomic_set(&adev->vm_manager.num_prt_users, 0);
2994
2995        /* If not overridden by the user, by default, only in large BAR systems
2996         * Compute VM tables will be updated by CPU
2997         */
2998#ifdef CONFIG_X86_64
2999        if (amdgpu_vm_update_mode == -1) {
3000                if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3001                        adev->vm_manager.vm_update_mode =
3002                                AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3003                else
3004                        adev->vm_manager.vm_update_mode = 0;
3005        } else
3006                adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3007#else
3008        adev->vm_manager.vm_update_mode = 0;
3009#endif
3010
3011        idr_init(&adev->vm_manager.pasid_idr);
3012        spin_lock_init(&adev->vm_manager.pasid_lock);
3013
3014        adev->vm_manager.xgmi_map_counter = 0;
3015        mutex_init(&adev->vm_manager.lock_pstate);
3016}
3017
3018/**
3019 * amdgpu_vm_manager_fini - cleanup VM manager
3020 *
3021 * @adev: amdgpu_device pointer
3022 *
3023 * Cleanup the VM manager and free resources.
3024 */
3025void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3026{
3027        WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3028        idr_destroy(&adev->vm_manager.pasid_idr);
3029
3030        amdgpu_vmid_mgr_fini(adev);
3031}
3032
3033/**
3034 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
3035 *
3036 * @dev: drm device pointer
3037 * @data: drm_amdgpu_vm
3038 * @filp: drm file pointer
3039 *
3040 * Returns:
3041 * 0 for success, -errno for errors.
3042 */
3043int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3044{
3045        union drm_amdgpu_vm *args = data;
3046        struct amdgpu_device *adev = dev->dev_private;
3047        struct amdgpu_fpriv *fpriv = filp->driver_priv;
3048        int r;
3049
3050        switch (args->in.op) {
3051        case AMDGPU_VM_OP_RESERVE_VMID:
3052                /* current, we only have requirement to reserve vmid from gfxhub */
3053                r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3054                if (r)
3055                        return r;
3056                break;
3057        case AMDGPU_VM_OP_UNRESERVE_VMID:
3058                amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
3059                break;
3060        default:
3061                return -EINVAL;
3062        }
3063
3064        return 0;
3065}
3066
3067/**
3068 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
3069 *
3070 * @adev: drm device pointer
3071 * @pasid: PASID identifier for VM
3072 * @task_info: task_info to fill.
3073 */
3074void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3075                         struct amdgpu_task_info *task_info)
3076{
3077        struct amdgpu_vm *vm;
3078        unsigned long flags;
3079
3080        spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3081
3082        vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3083        if (vm)
3084                *task_info = vm->task_info;
3085
3086        spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3087}
3088
3089/**
3090 * amdgpu_vm_set_task_info - Sets VMs task info.
3091 *
3092 * @vm: vm for which to set the info
3093 */
3094void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3095{
3096        if (!vm->task_info.pid) {
3097                vm->task_info.pid = current->pid;
3098                get_task_comm(vm->task_info.task_name, current);
3099
3100                if (current->group_leader->mm == current->mm) {
3101                        vm->task_info.tgid = current->group_leader->pid;
3102                        get_task_comm(vm->task_info.process_name, current->group_leader);
3103                }
3104        }
3105}
3106