linux/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
<<
>>
Prefs
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32#include <linux/list.h>
  33#include <linux/slab.h>
  34#include <drm/drmP.h>
  35#include <drm/amdgpu_drm.h>
  36#include <drm/drm_cache.h>
  37#include "amdgpu.h"
  38#include "amdgpu_trace.h"
  39
  40static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
  41{
  42        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  43        struct amdgpu_bo *bo;
  44
  45        bo = container_of(tbo, struct amdgpu_bo, tbo);
  46
  47        amdgpu_bo_kunmap(bo);
  48
  49        drm_gem_object_release(&bo->gem_base);
  50        amdgpu_bo_unref(&bo->parent);
  51        if (!list_empty(&bo->shadow_list)) {
  52                mutex_lock(&adev->shadow_list_lock);
  53                list_del_init(&bo->shadow_list);
  54                mutex_unlock(&adev->shadow_list_lock);
  55        }
  56        kfree(bo->metadata);
  57        kfree(bo);
  58}
  59
  60bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
  61{
  62        if (bo->destroy == &amdgpu_ttm_bo_destroy)
  63                return true;
  64        return false;
  65}
  66
  67static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
  68                                      struct ttm_placement *placement,
  69                                      struct ttm_place *places,
  70                                      u32 domain, u64 flags)
  71{
  72        u32 c = 0;
  73
  74        if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
  75                unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
  76
  77                places[c].fpfn = 0;
  78                places[c].lpfn = 0;
  79                places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  80                        TTM_PL_FLAG_VRAM;
  81
  82                if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
  83                        places[c].lpfn = visible_pfn;
  84                else
  85                        places[c].flags |= TTM_PL_FLAG_TOPDOWN;
  86
  87                if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
  88                        places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
  89                c++;
  90        }
  91
  92        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
  93                places[c].fpfn = 0;
  94                if (flags & AMDGPU_GEM_CREATE_SHADOW)
  95                        places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
  96                else
  97                        places[c].lpfn = 0;
  98                places[c].flags = TTM_PL_FLAG_TT;
  99                if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 100                        places[c].flags |= TTM_PL_FLAG_WC |
 101                                TTM_PL_FLAG_UNCACHED;
 102                else
 103                        places[c].flags |= TTM_PL_FLAG_CACHED;
 104                c++;
 105        }
 106
 107        if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 108                places[c].fpfn = 0;
 109                places[c].lpfn = 0;
 110                places[c].flags = TTM_PL_FLAG_SYSTEM;
 111                if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 112                        places[c].flags |= TTM_PL_FLAG_WC |
 113                                TTM_PL_FLAG_UNCACHED;
 114                else
 115                        places[c].flags |= TTM_PL_FLAG_CACHED;
 116                c++;
 117        }
 118
 119        if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 120                places[c].fpfn = 0;
 121                places[c].lpfn = 0;
 122                places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
 123                c++;
 124        }
 125
 126        if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 127                places[c].fpfn = 0;
 128                places[c].lpfn = 0;
 129                places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
 130                c++;
 131        }
 132
 133        if (domain & AMDGPU_GEM_DOMAIN_OA) {
 134                places[c].fpfn = 0;
 135                places[c].lpfn = 0;
 136                places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
 137                c++;
 138        }
 139
 140        if (!c) {
 141                places[c].fpfn = 0;
 142                places[c].lpfn = 0;
 143                places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 144                c++;
 145        }
 146
 147        placement->num_placement = c;
 148        placement->placement = places;
 149
 150        placement->num_busy_placement = c;
 151        placement->busy_placement = places;
 152}
 153
 154void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 155{
 156        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 157
 158        amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
 159                                  domain, abo->flags);
 160}
 161
 162static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
 163                                        struct ttm_placement *placement)
 164{
 165        BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
 166
 167        memcpy(bo->placements, placement->placement,
 168               placement->num_placement * sizeof(struct ttm_place));
 169        bo->placement.num_placement = placement->num_placement;
 170        bo->placement.num_busy_placement = placement->num_busy_placement;
 171        bo->placement.placement = bo->placements;
 172        bo->placement.busy_placement = bo->placements;
 173}
 174
 175/**
 176 * amdgpu_bo_create_reserved - create reserved BO for kernel use
 177 *
 178 * @adev: amdgpu device object
 179 * @size: size for the new BO
 180 * @align: alignment for the new BO
 181 * @domain: where to place it
 182 * @bo_ptr: resulting BO
 183 * @gpu_addr: GPU addr of the pinned BO
 184 * @cpu_addr: optional CPU address mapping
 185 *
 186 * Allocates and pins a BO for kernel internal use, and returns it still
 187 * reserved.
 188 *
 189 * Returns 0 on success, negative error code otherwise.
 190 */
 191int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 192                              unsigned long size, int align,
 193                              u32 domain, struct amdgpu_bo **bo_ptr,
 194                              u64 *gpu_addr, void **cpu_addr)
 195{
 196        bool free = false;
 197        int r;
 198
 199        if (!*bo_ptr) {
 200                r = amdgpu_bo_create(adev, size, align, true, domain,
 201                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 202                                     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 203                                     NULL, NULL, 0, bo_ptr);
 204                if (r) {
 205                        dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 206                                r);
 207                        return r;
 208                }
 209                free = true;
 210        }
 211
 212        r = amdgpu_bo_reserve(*bo_ptr, false);
 213        if (r) {
 214                dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 215                goto error_free;
 216        }
 217
 218        r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
 219        if (r) {
 220                dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 221                goto error_unreserve;
 222        }
 223
 224        if (cpu_addr) {
 225                r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 226                if (r) {
 227                        dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 228                        goto error_unreserve;
 229                }
 230        }
 231
 232        return 0;
 233
 234error_unreserve:
 235        amdgpu_bo_unreserve(*bo_ptr);
 236
 237error_free:
 238        if (free)
 239                amdgpu_bo_unref(bo_ptr);
 240
 241        return r;
 242}
 243
 244/**
 245 * amdgpu_bo_create_kernel - create BO for kernel use
 246 *
 247 * @adev: amdgpu device object
 248 * @size: size for the new BO
 249 * @align: alignment for the new BO
 250 * @domain: where to place it
 251 * @bo_ptr: resulting BO
 252 * @gpu_addr: GPU addr of the pinned BO
 253 * @cpu_addr: optional CPU address mapping
 254 *
 255 * Allocates and pins a BO for kernel internal use.
 256 *
 257 * Returns 0 on success, negative error code otherwise.
 258 */
 259int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 260                            unsigned long size, int align,
 261                            u32 domain, struct amdgpu_bo **bo_ptr,
 262                            u64 *gpu_addr, void **cpu_addr)
 263{
 264        int r;
 265
 266        r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 267                                      gpu_addr, cpu_addr);
 268
 269        if (r)
 270                return r;
 271
 272        amdgpu_bo_unreserve(*bo_ptr);
 273
 274        return 0;
 275}
 276
 277/**
 278 * amdgpu_bo_free_kernel - free BO for kernel use
 279 *
 280 * @bo: amdgpu BO to free
 281 *
 282 * unmaps and unpin a BO for kernel internal use.
 283 */
 284void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 285                           void **cpu_addr)
 286{
 287        if (*bo == NULL)
 288                return;
 289
 290        if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 291                if (cpu_addr)
 292                        amdgpu_bo_kunmap(*bo);
 293
 294                amdgpu_bo_unpin(*bo);
 295                amdgpu_bo_unreserve(*bo);
 296        }
 297        amdgpu_bo_unref(bo);
 298
 299        if (gpu_addr)
 300                *gpu_addr = 0;
 301
 302        if (cpu_addr)
 303                *cpu_addr = NULL;
 304}
 305
 306int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 307                                unsigned long size, int byte_align,
 308                                bool kernel, u32 domain, u64 flags,
 309                                struct sg_table *sg,
 310                                struct ttm_placement *placement,
 311                                struct reservation_object *resv,
 312                                uint64_t init_value,
 313                                struct amdgpu_bo **bo_ptr)
 314{
 315        struct amdgpu_bo *bo;
 316        enum ttm_bo_type type;
 317        unsigned long page_align;
 318        u64 initial_bytes_moved, bytes_moved;
 319        size_t acc_size;
 320        int r;
 321
 322        page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 323        size = ALIGN(size, PAGE_SIZE);
 324
 325        if (kernel) {
 326                type = ttm_bo_type_kernel;
 327        } else if (sg) {
 328                type = ttm_bo_type_sg;
 329        } else {
 330                type = ttm_bo_type_device;
 331        }
 332        *bo_ptr = NULL;
 333
 334        acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 335                                       sizeof(struct amdgpu_bo));
 336
 337        bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 338        if (bo == NULL)
 339                return -ENOMEM;
 340        r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
 341        if (unlikely(r)) {
 342                kfree(bo);
 343                return r;
 344        }
 345        INIT_LIST_HEAD(&bo->shadow_list);
 346        INIT_LIST_HEAD(&bo->va);
 347        bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
 348                                         AMDGPU_GEM_DOMAIN_GTT |
 349                                         AMDGPU_GEM_DOMAIN_CPU |
 350                                         AMDGPU_GEM_DOMAIN_GDS |
 351                                         AMDGPU_GEM_DOMAIN_GWS |
 352                                         AMDGPU_GEM_DOMAIN_OA);
 353        bo->allowed_domains = bo->preferred_domains;
 354        if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 355                bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 356
 357        bo->flags = flags;
 358
 359#ifdef CONFIG_X86_32
 360        /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 361         * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 362         */
 363        bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 364#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 365        /* Don't try to enable write-combining when it can't work, or things
 366         * may be slow
 367         * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 368         */
 369
 370#ifndef CONFIG_COMPILE_TEST
 371#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 372         thanks to write-combining
 373#endif
 374
 375        if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 376                DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 377                              "better performance thanks to write-combining\n");
 378        bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 379#else
 380        /* For architectures that don't support WC memory,
 381         * mask out the WC flag from the BO
 382         */
 383        if (!drm_arch_can_wc_memory())
 384                bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 385#endif
 386
 387        amdgpu_fill_placement_to_bo(bo, placement);
 388        /* Kernel allocation are uninterruptible */
 389
 390        initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
 391        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
 392                                 &bo->placement, page_align, !kernel, NULL,
 393                                 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
 394        bytes_moved = atomic64_read(&adev->num_bytes_moved) -
 395                      initial_bytes_moved;
 396        if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
 397            bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 398            bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
 399                amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
 400        else
 401                amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
 402
 403        if (unlikely(r != 0))
 404                return r;
 405
 406        if (kernel)
 407                bo->tbo.priority = 1;
 408
 409        if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 410            bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 411                struct dma_fence *fence;
 412
 413                r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
 414                if (unlikely(r))
 415                        goto fail_unreserve;
 416
 417                amdgpu_bo_fence(bo, fence, false);
 418                dma_fence_put(bo->tbo.moving);
 419                bo->tbo.moving = dma_fence_get(fence);
 420                dma_fence_put(fence);
 421        }
 422        if (!resv)
 423                amdgpu_bo_unreserve(bo);
 424        *bo_ptr = bo;
 425
 426        trace_amdgpu_bo_create(bo);
 427
 428        /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 429        if (type == ttm_bo_type_device)
 430                bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 431
 432        return 0;
 433
 434fail_unreserve:
 435        if (!resv)
 436                ww_mutex_unlock(&bo->tbo.resv->lock);
 437        amdgpu_bo_unref(&bo);
 438        return r;
 439}
 440
 441static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 442                                   unsigned long size, int byte_align,
 443                                   struct amdgpu_bo *bo)
 444{
 445        struct ttm_placement placement = {0};
 446        struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
 447        int r;
 448
 449        if (bo->shadow)
 450                return 0;
 451
 452        memset(&placements, 0, sizeof(placements));
 453        amdgpu_ttm_placement_init(adev, &placement, placements,
 454                                  AMDGPU_GEM_DOMAIN_GTT,
 455                                  AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 456                                  AMDGPU_GEM_CREATE_SHADOW);
 457
 458        r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
 459                                        AMDGPU_GEM_DOMAIN_GTT,
 460                                        AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 461                                        AMDGPU_GEM_CREATE_SHADOW,
 462                                        NULL, &placement,
 463                                        bo->tbo.resv,
 464                                        0,
 465                                        &bo->shadow);
 466        if (!r) {
 467                bo->shadow->parent = amdgpu_bo_ref(bo);
 468                mutex_lock(&adev->shadow_list_lock);
 469                list_add_tail(&bo->shadow_list, &adev->shadow_list);
 470                mutex_unlock(&adev->shadow_list_lock);
 471        }
 472
 473        return r;
 474}
 475
 476/* init_value will only take effect when flags contains
 477 * AMDGPU_GEM_CREATE_VRAM_CLEARED.
 478 */
 479int amdgpu_bo_create(struct amdgpu_device *adev,
 480                     unsigned long size, int byte_align,
 481                     bool kernel, u32 domain, u64 flags,
 482                     struct sg_table *sg,
 483                     struct reservation_object *resv,
 484                     uint64_t init_value,
 485                     struct amdgpu_bo **bo_ptr)
 486{
 487        struct ttm_placement placement = {0};
 488        struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
 489        uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
 490        int r;
 491
 492        memset(&placements, 0, sizeof(placements));
 493        amdgpu_ttm_placement_init(adev, &placement, placements,
 494                                  domain, parent_flags);
 495
 496        r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
 497                                        parent_flags, sg, &placement, resv,
 498                                        init_value, bo_ptr);
 499        if (r)
 500                return r;
 501
 502        if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
 503                if (!resv)
 504                        WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
 505                                                        NULL));
 506
 507                r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
 508
 509                if (!resv)
 510                        reservation_object_unlock((*bo_ptr)->tbo.resv);
 511
 512                if (r)
 513                        amdgpu_bo_unref(bo_ptr);
 514        }
 515
 516        return r;
 517}
 518
 519int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 520                               struct amdgpu_ring *ring,
 521                               struct amdgpu_bo *bo,
 522                               struct reservation_object *resv,
 523                               struct dma_fence **fence,
 524                               bool direct)
 525
 526{
 527        struct amdgpu_bo *shadow = bo->shadow;
 528        uint64_t bo_addr, shadow_addr;
 529        int r;
 530
 531        if (!shadow)
 532                return -EINVAL;
 533
 534        bo_addr = amdgpu_bo_gpu_offset(bo);
 535        shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 536
 537        r = reservation_object_reserve_shared(bo->tbo.resv);
 538        if (r)
 539                goto err;
 540
 541        r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
 542                               amdgpu_bo_size(bo), resv, fence,
 543                               direct, false);
 544        if (!r)
 545                amdgpu_bo_fence(bo, *fence, true);
 546
 547err:
 548        return r;
 549}
 550
 551int amdgpu_bo_validate(struct amdgpu_bo *bo)
 552{
 553        uint32_t domain;
 554        int r;
 555
 556        if (bo->pin_count)
 557                return 0;
 558
 559        domain = bo->preferred_domains;
 560
 561retry:
 562        amdgpu_ttm_placement_from_domain(bo, domain);
 563        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 564        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 565                domain = bo->allowed_domains;
 566                goto retry;
 567        }
 568
 569        return r;
 570}
 571
 572int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
 573                                  struct amdgpu_ring *ring,
 574                                  struct amdgpu_bo *bo,
 575                                  struct reservation_object *resv,
 576                                  struct dma_fence **fence,
 577                                  bool direct)
 578
 579{
 580        struct amdgpu_bo *shadow = bo->shadow;
 581        uint64_t bo_addr, shadow_addr;
 582        int r;
 583
 584        if (!shadow)
 585                return -EINVAL;
 586
 587        bo_addr = amdgpu_bo_gpu_offset(bo);
 588        shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 589
 590        r = reservation_object_reserve_shared(bo->tbo.resv);
 591        if (r)
 592                goto err;
 593
 594        r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
 595                               amdgpu_bo_size(bo), resv, fence,
 596                               direct, false);
 597        if (!r)
 598                amdgpu_bo_fence(bo, *fence, true);
 599
 600err:
 601        return r;
 602}
 603
 604int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 605{
 606        void *kptr;
 607        long r;
 608
 609        if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 610                return -EPERM;
 611
 612        kptr = amdgpu_bo_kptr(bo);
 613        if (kptr) {
 614                if (ptr)
 615                        *ptr = kptr;
 616                return 0;
 617        }
 618
 619        r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
 620                                                MAX_SCHEDULE_TIMEOUT);
 621        if (r < 0)
 622                return r;
 623
 624        r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 625        if (r)
 626                return r;
 627
 628        if (ptr)
 629                *ptr = amdgpu_bo_kptr(bo);
 630
 631        return 0;
 632}
 633
 634void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 635{
 636        bool is_iomem;
 637
 638        return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 639}
 640
 641void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 642{
 643        if (bo->kmap.bo)
 644                ttm_bo_kunmap(&bo->kmap);
 645}
 646
 647struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 648{
 649        if (bo == NULL)
 650                return NULL;
 651
 652        ttm_bo_reference(&bo->tbo);
 653        return bo;
 654}
 655
 656void amdgpu_bo_unref(struct amdgpu_bo **bo)
 657{
 658        struct ttm_buffer_object *tbo;
 659
 660        if ((*bo) == NULL)
 661                return;
 662
 663        tbo = &((*bo)->tbo);
 664        ttm_bo_unref(&tbo);
 665        if (tbo == NULL)
 666                *bo = NULL;
 667}
 668
 669int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 670                             u64 min_offset, u64 max_offset,
 671                             u64 *gpu_addr)
 672{
 673        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 674        int r, i;
 675        unsigned fpfn, lpfn;
 676
 677        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 678                return -EPERM;
 679
 680        if (WARN_ON_ONCE(min_offset > max_offset))
 681                return -EINVAL;
 682
 683        /* A shared bo cannot be migrated to VRAM */
 684        if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
 685                return -EINVAL;
 686
 687        if (bo->pin_count) {
 688                uint32_t mem_type = bo->tbo.mem.mem_type;
 689
 690                if (domain != amdgpu_mem_type_to_domain(mem_type))
 691                        return -EINVAL;
 692
 693                bo->pin_count++;
 694                if (gpu_addr)
 695                        *gpu_addr = amdgpu_bo_gpu_offset(bo);
 696
 697                if (max_offset != 0) {
 698                        u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 699                        WARN_ON_ONCE(max_offset <
 700                                     (amdgpu_bo_gpu_offset(bo) - domain_start));
 701                }
 702
 703                return 0;
 704        }
 705
 706        bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 707        amdgpu_ttm_placement_from_domain(bo, domain);
 708        for (i = 0; i < bo->placement.num_placement; i++) {
 709                /* force to pin into visible video ram */
 710                if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
 711                    !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
 712                    (!max_offset || max_offset >
 713                     adev->mc.visible_vram_size)) {
 714                        if (WARN_ON_ONCE(min_offset >
 715                                         adev->mc.visible_vram_size))
 716                                return -EINVAL;
 717                        fpfn = min_offset >> PAGE_SHIFT;
 718                        lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
 719                } else {
 720                        fpfn = min_offset >> PAGE_SHIFT;
 721                        lpfn = max_offset >> PAGE_SHIFT;
 722                }
 723                if (fpfn > bo->placements[i].fpfn)
 724                        bo->placements[i].fpfn = fpfn;
 725                if (!bo->placements[i].lpfn ||
 726                    (lpfn && lpfn < bo->placements[i].lpfn))
 727                        bo->placements[i].lpfn = lpfn;
 728                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 729        }
 730
 731        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 732        if (unlikely(r)) {
 733                dev_err(adev->dev, "%p pin failed\n", bo);
 734                goto error;
 735        }
 736
 737        bo->pin_count = 1;
 738        if (gpu_addr != NULL) {
 739                r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
 740                if (unlikely(r)) {
 741                        dev_err(adev->dev, "%p bind failed\n", bo);
 742                        goto error;
 743                }
 744                *gpu_addr = amdgpu_bo_gpu_offset(bo);
 745        }
 746        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 747                adev->vram_pin_size += amdgpu_bo_size(bo);
 748                if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 749                        adev->invisible_pin_size += amdgpu_bo_size(bo);
 750        } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 751                adev->gart_pin_size += amdgpu_bo_size(bo);
 752        }
 753
 754error:
 755        return r;
 756}
 757
 758int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 759{
 760        return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
 761}
 762
 763int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 764{
 765        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 766        int r, i;
 767
 768        if (!bo->pin_count) {
 769                dev_warn(adev->dev, "%p unpin not necessary\n", bo);
 770                return 0;
 771        }
 772        bo->pin_count--;
 773        if (bo->pin_count)
 774                return 0;
 775        for (i = 0; i < bo->placement.num_placement; i++) {
 776                bo->placements[i].lpfn = 0;
 777                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 778        }
 779        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 780        if (unlikely(r)) {
 781                dev_err(adev->dev, "%p validate failed for unpin\n", bo);
 782                goto error;
 783        }
 784
 785        if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
 786                adev->vram_pin_size -= amdgpu_bo_size(bo);
 787                if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 788                        adev->invisible_pin_size -= amdgpu_bo_size(bo);
 789        } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
 790                adev->gart_pin_size -= amdgpu_bo_size(bo);
 791        }
 792
 793error:
 794        return r;
 795}
 796
 797int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 798{
 799        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
 800        if (0 && (adev->flags & AMD_IS_APU)) {
 801                /* Useless to evict on IGP chips */
 802                return 0;
 803        }
 804        return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 805}
 806
 807static const char *amdgpu_vram_names[] = {
 808        "UNKNOWN",
 809        "GDDR1",
 810        "DDR2",
 811        "GDDR3",
 812        "GDDR4",
 813        "GDDR5",
 814        "HBM",
 815        "DDR3"
 816};
 817
 818int amdgpu_bo_init(struct amdgpu_device *adev)
 819{
 820        /* reserve PAT memory space to WC for VRAM */
 821        arch_io_reserve_memtype_wc(adev->mc.aper_base,
 822                                   adev->mc.aper_size);
 823
 824        /* Add an MTRR for the VRAM */
 825        adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
 826                                              adev->mc.aper_size);
 827        DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
 828                adev->mc.mc_vram_size >> 20,
 829                (unsigned long long)adev->mc.aper_size >> 20);
 830        DRM_INFO("RAM width %dbits %s\n",
 831                 adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
 832        return amdgpu_ttm_init(adev);
 833}
 834
 835void amdgpu_bo_fini(struct amdgpu_device *adev)
 836{
 837        amdgpu_ttm_fini(adev);
 838        arch_phys_wc_del(adev->mc.vram_mtrr);
 839        arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
 840}
 841
 842int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 843                             struct vm_area_struct *vma)
 844{
 845        return ttm_fbdev_mmap(vma, &bo->tbo);
 846}
 847
 848int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 849{
 850        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 851
 852        if (adev->family <= AMDGPU_FAMILY_CZ &&
 853            AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
 854                return -EINVAL;
 855
 856        bo->tiling_flags = tiling_flags;
 857        return 0;
 858}
 859
 860void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 861{
 862        lockdep_assert_held(&bo->tbo.resv->lock.base);
 863
 864        if (tiling_flags)
 865                *tiling_flags = bo->tiling_flags;
 866}
 867
 868int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
 869                            uint32_t metadata_size, uint64_t flags)
 870{
 871        void *buffer;
 872
 873        if (!metadata_size) {
 874                if (bo->metadata_size) {
 875                        kfree(bo->metadata);
 876                        bo->metadata = NULL;
 877                        bo->metadata_size = 0;
 878                }
 879                return 0;
 880        }
 881
 882        if (metadata == NULL)
 883                return -EINVAL;
 884
 885        buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
 886        if (buffer == NULL)
 887                return -ENOMEM;
 888
 889        kfree(bo->metadata);
 890        bo->metadata_flags = flags;
 891        bo->metadata = buffer;
 892        bo->metadata_size = metadata_size;
 893
 894        return 0;
 895}
 896
 897int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 898                           size_t buffer_size, uint32_t *metadata_size,
 899                           uint64_t *flags)
 900{
 901        if (!buffer && !metadata_size)
 902                return -EINVAL;
 903
 904        if (buffer) {
 905                if (buffer_size < bo->metadata_size)
 906                        return -EINVAL;
 907
 908                if (bo->metadata_size)
 909                        memcpy(buffer, bo->metadata, bo->metadata_size);
 910        }
 911
 912        if (metadata_size)
 913                *metadata_size = bo->metadata_size;
 914        if (flags)
 915                *flags = bo->metadata_flags;
 916
 917        return 0;
 918}
 919
 920void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 921                           bool evict,
 922                           struct ttm_mem_reg *new_mem)
 923{
 924        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 925        struct amdgpu_bo *abo;
 926        struct ttm_mem_reg *old_mem = &bo->mem;
 927
 928        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
 929                return;
 930
 931        abo = container_of(bo, struct amdgpu_bo, tbo);
 932        amdgpu_vm_bo_invalidate(adev, abo);
 933
 934        amdgpu_bo_kunmap(abo);
 935
 936        /* remember the eviction */
 937        if (evict)
 938                atomic64_inc(&adev->num_evictions);
 939
 940        /* update statistics */
 941        if (!new_mem)
 942                return;
 943
 944        /* move_notify is called before move happens */
 945        trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 946}
 947
 948int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 949{
 950        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 951        struct amdgpu_bo *abo;
 952        unsigned long offset, size;
 953        int r;
 954
 955        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
 956                return 0;
 957
 958        abo = container_of(bo, struct amdgpu_bo, tbo);
 959
 960        /* Remember that this BO was accessed by the CPU */
 961        abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 962
 963        if (bo->mem.mem_type != TTM_PL_VRAM)
 964                return 0;
 965
 966        size = bo->mem.num_pages << PAGE_SHIFT;
 967        offset = bo->mem.start << PAGE_SHIFT;
 968        if ((offset + size) <= adev->mc.visible_vram_size)
 969                return 0;
 970
 971        /* Can't move a pinned BO to visible VRAM */
 972        if (abo->pin_count > 0)
 973                return -EINVAL;
 974
 975        /* hurrah the memory is not visible ! */
 976        atomic64_inc(&adev->num_vram_cpu_page_faults);
 977        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 978                                         AMDGPU_GEM_DOMAIN_GTT);
 979
 980        /* Avoid costly evictions; only set GTT as a busy placement */
 981        abo->placement.num_busy_placement = 1;
 982        abo->placement.busy_placement = &abo->placements[1];
 983
 984        r = ttm_bo_validate(bo, &abo->placement, false, false);
 985        if (unlikely(r != 0))
 986                return r;
 987
 988        offset = bo->mem.start << PAGE_SHIFT;
 989        /* this should never happen */
 990        if (bo->mem.mem_type == TTM_PL_VRAM &&
 991            (offset + size) > adev->mc.visible_vram_size)
 992                return -EINVAL;
 993
 994        return 0;
 995}
 996
 997/**
 998 * amdgpu_bo_fence - add fence to buffer object
 999 *
1000 * @bo: buffer object in question
1001 * @fence: fence to add
1002 * @shared: true if fence should be added shared
1003 *
1004 */
1005void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1006                     bool shared)
1007{
1008        struct reservation_object *resv = bo->tbo.resv;
1009
1010        if (shared)
1011                reservation_object_add_shared_fence(resv, fence);
1012        else
1013                reservation_object_add_excl_fence(resv, fence);
1014}
1015
1016/**
1017 * amdgpu_bo_gpu_offset - return GPU offset of bo
1018 * @bo: amdgpu object for which we query the offset
1019 *
1020 * Returns current GPU offset of the object.
1021 *
1022 * Note: object should either be pinned or reserved when calling this
1023 * function, it might be useful to add check for this for debugging.
1024 */
1025u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1026{
1027        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1028        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
1029                     !amdgpu_ttm_is_bound(bo->tbo.ttm));
1030        WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1031                     !bo->pin_count);
1032        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1033        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1034                     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1035
1036        return bo->tbo.offset;
1037}
1038