linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32
  33#include <linux/dma-mapping.h>
  34#include <linux/iommu.h>
  35#include <linux/hmm.h>
  36#include <linux/pagemap.h>
  37#include <linux/sched/task.h>
  38#include <linux/sched/mm.h>
  39#include <linux/seq_file.h>
  40#include <linux/slab.h>
  41#include <linux/swap.h>
  42#include <linux/swiotlb.h>
  43#include <linux/dma-buf.h>
  44#include <linux/sizes.h>
  45
  46#include <drm/ttm/ttm_bo_api.h>
  47#include <drm/ttm/ttm_bo_driver.h>
  48#include <drm/ttm/ttm_placement.h>
  49#include <drm/ttm/ttm_module.h>
  50
  51#include <drm/drm_debugfs.h>
  52#include <drm/amdgpu_drm.h>
  53
  54#include "amdgpu.h"
  55#include "amdgpu_object.h"
  56#include "amdgpu_trace.h"
  57#include "amdgpu_amdkfd.h"
  58#include "amdgpu_sdma.h"
  59#include "amdgpu_ras.h"
  60#include "amdgpu_atomfirmware.h"
  61#include "bif/bif_4_1_d.h"
  62
  63#define AMDGPU_TTM_VRAM_MAX_DW_READ     (size_t)128
  64
  65static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
  66                                   struct ttm_tt *ttm,
  67                                   struct ttm_resource *bo_mem);
  68static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
  69                                      struct ttm_tt *ttm);
  70
  71static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
  72                                    unsigned int type,
  73                                    uint64_t size_in_page)
  74{
  75        return ttm_range_man_init(&adev->mman.bdev, type,
  76                                  false, size_in_page);
  77}
  78
  79/**
  80 * amdgpu_evict_flags - Compute placement flags
  81 *
  82 * @bo: The buffer object to evict
  83 * @placement: Possible destination(s) for evicted BO
  84 *
  85 * Fill in placement data when ttm_bo_evict() is called
  86 */
  87static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  88                                struct ttm_placement *placement)
  89{
  90        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
  91        struct amdgpu_bo *abo;
  92        static const struct ttm_place placements = {
  93                .fpfn = 0,
  94                .lpfn = 0,
  95                .mem_type = TTM_PL_SYSTEM,
  96                .flags = 0
  97        };
  98
  99        /* Don't handle scatter gather BOs */
 100        if (bo->type == ttm_bo_type_sg) {
 101                placement->num_placement = 0;
 102                placement->num_busy_placement = 0;
 103                return;
 104        }
 105
 106        /* Object isn't an AMDGPU object so ignore */
 107        if (!amdgpu_bo_is_amdgpu_bo(bo)) {
 108                placement->placement = &placements;
 109                placement->busy_placement = &placements;
 110                placement->num_placement = 1;
 111                placement->num_busy_placement = 1;
 112                return;
 113        }
 114
 115        abo = ttm_to_amdgpu_bo(bo);
 116        switch (bo->mem.mem_type) {
 117        case AMDGPU_PL_GDS:
 118        case AMDGPU_PL_GWS:
 119        case AMDGPU_PL_OA:
 120                placement->num_placement = 0;
 121                placement->num_busy_placement = 0;
 122                return;
 123
 124        case TTM_PL_VRAM:
 125                if (!adev->mman.buffer_funcs_enabled) {
 126                        /* Move to system memory */
 127                        amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 128                } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 129                           !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
 130                           amdgpu_bo_in_cpu_visible_vram(abo)) {
 131
 132                        /* Try evicting to the CPU inaccessible part of VRAM
 133                         * first, but only set GTT as busy placement, so this
 134                         * BO will be evicted to GTT rather than causing other
 135                         * BOs to be evicted from VRAM
 136                         */
 137                        amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
 138                                                         AMDGPU_GEM_DOMAIN_GTT);
 139                        abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 140                        abo->placements[0].lpfn = 0;
 141                        abo->placement.busy_placement = &abo->placements[1];
 142                        abo->placement.num_busy_placement = 1;
 143                } else {
 144                        /* Move to GTT memory */
 145                        amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
 146                }
 147                break;
 148        case TTM_PL_TT:
 149        default:
 150                amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 151                break;
 152        }
 153        *placement = abo->placement;
 154}
 155
 156/**
 157 * amdgpu_verify_access - Verify access for a mmap call
 158 *
 159 * @bo: The buffer object to map
 160 * @filp: The file pointer from the process performing the mmap
 161 *
 162 * This is called by ttm_bo_mmap() to verify whether a process
 163 * has the right to mmap a BO to their process space.
 164 */
 165static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 166{
 167        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 168
 169        /*
 170         * Don't verify access for KFD BOs. They don't have a GEM
 171         * object associated with them.
 172         */
 173        if (abo->kfd_bo)
 174                return 0;
 175
 176        if (amdgpu_ttm_tt_get_usermm(bo->ttm))
 177                return -EPERM;
 178        return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
 179                                          filp->private_data);
 180}
 181
 182/**
 183 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
 184 *
 185 * @bo: The bo to assign the memory to.
 186 * @mm_node: Memory manager node for drm allocator.
 187 * @mem: The region where the bo resides.
 188 *
 189 */
 190static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 191                                    struct drm_mm_node *mm_node,
 192                                    struct ttm_resource *mem)
 193{
 194        uint64_t addr = 0;
 195
 196        if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
 197                addr = mm_node->start << PAGE_SHIFT;
 198                addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
 199                                                mem->mem_type);
 200        }
 201        return addr;
 202}
 203
 204/**
 205 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
 206 * @offset. It also modifies the offset to be within the drm_mm_node returned
 207 *
 208 * @mem: The region where the bo resides.
 209 * @offset: The offset that drm_mm_node is used for finding.
 210 *
 211 */
 212static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
 213                                               uint64_t *offset)
 214{
 215        struct drm_mm_node *mm_node = mem->mm_node;
 216
 217        while (*offset >= (mm_node->size << PAGE_SHIFT)) {
 218                *offset -= (mm_node->size << PAGE_SHIFT);
 219                ++mm_node;
 220        }
 221        return mm_node;
 222}
 223
 224/**
 225 * amdgpu_ttm_map_buffer - Map memory into the GART windows
 226 * @bo: buffer object to map
 227 * @mem: memory object to map
 228 * @mm_node: drm_mm node object to map
 229 * @num_pages: number of pages to map
 230 * @offset: offset into @mm_node where to start
 231 * @window: which GART window to use
 232 * @ring: DMA ring to use for the copy
 233 * @tmz: if we should setup a TMZ enabled mapping
 234 * @addr: resulting address inside the MC address space
 235 *
 236 * Setup one of the GART windows to access a specific piece of memory or return
 237 * the physical address for local memory.
 238 */
 239static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 240                                 struct ttm_resource *mem,
 241                                 struct drm_mm_node *mm_node,
 242                                 unsigned num_pages, uint64_t offset,
 243                                 unsigned window, struct amdgpu_ring *ring,
 244                                 bool tmz, uint64_t *addr)
 245{
 246        struct amdgpu_device *adev = ring->adev;
 247        struct amdgpu_job *job;
 248        unsigned num_dw, num_bytes;
 249        struct dma_fence *fence;
 250        uint64_t src_addr, dst_addr;
 251        void *cpu_addr;
 252        uint64_t flags;
 253        unsigned int i;
 254        int r;
 255
 256        BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
 257               AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
 258
 259        /* Map only what can't be accessed directly */
 260        if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
 261                *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
 262                return 0;
 263        }
 264
 265        *addr = adev->gmc.gart_start;
 266        *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
 267                AMDGPU_GPU_PAGE_SIZE;
 268        *addr += offset & ~PAGE_MASK;
 269
 270        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
 271        num_bytes = num_pages * 8;
 272
 273        r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
 274                                     AMDGPU_IB_POOL_DELAYED, &job);
 275        if (r)
 276                return r;
 277
 278        src_addr = num_dw * 4;
 279        src_addr += job->ibs[0].gpu_addr;
 280
 281        dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 282        dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
 283        amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
 284                                dst_addr, num_bytes, false);
 285
 286        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
 287        WARN_ON(job->ibs[0].length_dw > num_dw);
 288
 289        flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
 290        if (tmz)
 291                flags |= AMDGPU_PTE_TMZ;
 292
 293        cpu_addr = &job->ibs[0].ptr[num_dw];
 294
 295        if (mem->mem_type == TTM_PL_TT) {
 296                dma_addr_t *dma_address;
 297
 298                dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
 299                r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
 300                                    cpu_addr);
 301                if (r)
 302                        goto error_free;
 303        } else {
 304                dma_addr_t dma_address;
 305
 306                dma_address = (mm_node->start << PAGE_SHIFT) + offset;
 307                dma_address += adev->vm_manager.vram_base_offset;
 308
 309                for (i = 0; i < num_pages; ++i) {
 310                        r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
 311                                            &dma_address, flags, cpu_addr);
 312                        if (r)
 313                                goto error_free;
 314
 315                        dma_address += PAGE_SIZE;
 316                }
 317        }
 318
 319        r = amdgpu_job_submit(job, &adev->mman.entity,
 320                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
 321        if (r)
 322                goto error_free;
 323
 324        dma_fence_put(fence);
 325
 326        return r;
 327
 328error_free:
 329        amdgpu_job_free(job);
 330        return r;
 331}
 332
 333/**
 334 * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
 335 * @adev: amdgpu device
 336 * @src: buffer/address where to read from
 337 * @dst: buffer/address where to write to
 338 * @size: number of bytes to copy
 339 * @tmz: if a secure copy should be used
 340 * @resv: resv object to sync to
 341 * @f: Returns the last fence if multiple jobs are submitted.
 342 *
 343 * The function copies @size bytes from {src->mem + src->offset} to
 344 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
 345 * move and different for a BO to BO copy.
 346 *
 347 */
 348int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 349                               const struct amdgpu_copy_mem *src,
 350                               const struct amdgpu_copy_mem *dst,
 351                               uint64_t size, bool tmz,
 352                               struct dma_resv *resv,
 353                               struct dma_fence **f)
 354{
 355        const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
 356                                        AMDGPU_GPU_PAGE_SIZE);
 357
 358        uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
 359        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 360        struct drm_mm_node *src_mm, *dst_mm;
 361        struct dma_fence *fence = NULL;
 362        int r = 0;
 363
 364        if (!adev->mman.buffer_funcs_enabled) {
 365                DRM_ERROR("Trying to move memory with ring turned off.\n");
 366                return -EINVAL;
 367        }
 368
 369        src_offset = src->offset;
 370        if (src->mem->mm_node) {
 371                src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
 372                src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
 373        } else {
 374                src_mm = NULL;
 375                src_node_size = ULLONG_MAX;
 376        }
 377
 378        dst_offset = dst->offset;
 379        if (dst->mem->mm_node) {
 380                dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
 381                dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
 382        } else {
 383                dst_mm = NULL;
 384                dst_node_size = ULLONG_MAX;
 385        }
 386
 387        mutex_lock(&adev->mman.gtt_window_lock);
 388
 389        while (size) {
 390                uint32_t src_page_offset = src_offset & ~PAGE_MASK;
 391                uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
 392                struct dma_fence *next;
 393                uint32_t cur_size;
 394                uint64_t from, to;
 395
 396                /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
 397                 * begins at an offset, then adjust the size accordingly
 398                 */
 399                cur_size = max(src_page_offset, dst_page_offset);
 400                cur_size = min(min3(src_node_size, dst_node_size, size),
 401                               (uint64_t)(GTT_MAX_BYTES - cur_size));
 402
 403                /* Map src to window 0 and dst to window 1. */
 404                r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
 405                                          PFN_UP(cur_size + src_page_offset),
 406                                          src_offset, 0, ring, tmz, &from);
 407                if (r)
 408                        goto error;
 409
 410                r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
 411                                          PFN_UP(cur_size + dst_page_offset),
 412                                          dst_offset, 1, ring, tmz, &to);
 413                if (r)
 414                        goto error;
 415
 416                r = amdgpu_copy_buffer(ring, from, to, cur_size,
 417                                       resv, &next, false, true, tmz);
 418                if (r)
 419                        goto error;
 420
 421                dma_fence_put(fence);
 422                fence = next;
 423
 424                size -= cur_size;
 425                if (!size)
 426                        break;
 427
 428                src_node_size -= cur_size;
 429                if (!src_node_size) {
 430                        ++src_mm;
 431                        src_node_size = src_mm->size << PAGE_SHIFT;
 432                        src_offset = 0;
 433                } else {
 434                        src_offset += cur_size;
 435                }
 436
 437                dst_node_size -= cur_size;
 438                if (!dst_node_size) {
 439                        ++dst_mm;
 440                        dst_node_size = dst_mm->size << PAGE_SHIFT;
 441                        dst_offset = 0;
 442                } else {
 443                        dst_offset += cur_size;
 444                }
 445        }
 446error:
 447        mutex_unlock(&adev->mman.gtt_window_lock);
 448        if (f)
 449                *f = dma_fence_get(fence);
 450        dma_fence_put(fence);
 451        return r;
 452}
 453
 454/*
 455 * amdgpu_move_blit - Copy an entire buffer to another buffer
 456 *
 457 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
 458 * help move buffers to and from VRAM.
 459 */
 460static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 461                            bool evict,
 462                            struct ttm_resource *new_mem,
 463                            struct ttm_resource *old_mem)
 464{
 465        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 466        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 467        struct amdgpu_copy_mem src, dst;
 468        struct dma_fence *fence = NULL;
 469        int r;
 470
 471        src.bo = bo;
 472        dst.bo = bo;
 473        src.mem = old_mem;
 474        dst.mem = new_mem;
 475        src.offset = 0;
 476        dst.offset = 0;
 477
 478        r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
 479                                       new_mem->num_pages << PAGE_SHIFT,
 480                                       amdgpu_bo_encrypted(abo),
 481                                       bo->base.resv, &fence);
 482        if (r)
 483                goto error;
 484
 485        /* clear the space being freed */
 486        if (old_mem->mem_type == TTM_PL_VRAM &&
 487            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
 488                struct dma_fence *wipe_fence = NULL;
 489
 490                r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
 491                                       NULL, &wipe_fence);
 492                if (r) {
 493                        goto error;
 494                } else if (wipe_fence) {
 495                        dma_fence_put(fence);
 496                        fence = wipe_fence;
 497                }
 498        }
 499
 500        /* Always block for VM page tables before committing the new location */
 501        if (bo->type == ttm_bo_type_kernel)
 502                r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
 503        else
 504                r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
 505        dma_fence_put(fence);
 506        return r;
 507
 508error:
 509        if (fence)
 510                dma_fence_wait(fence, false);
 511        dma_fence_put(fence);
 512        return r;
 513}
 514
 515/*
 516 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
 517 *
 518 * Called by amdgpu_bo_move()
 519 */
 520static bool amdgpu_mem_visible(struct amdgpu_device *adev,
 521                               struct ttm_resource *mem)
 522{
 523        struct drm_mm_node *nodes = mem->mm_node;
 524
 525        if (mem->mem_type == TTM_PL_SYSTEM ||
 526            mem->mem_type == TTM_PL_TT)
 527                return true;
 528        if (mem->mem_type != TTM_PL_VRAM)
 529                return false;
 530
 531        /* ttm_resource_ioremap only supports contiguous memory */
 532        if (nodes->size != mem->num_pages)
 533                return false;
 534
 535        return ((nodes->start + nodes->size) << PAGE_SHIFT)
 536                <= adev->gmc.visible_vram_size;
 537}
 538
 539/*
 540 * amdgpu_bo_move - Move a buffer object to a new memory location
 541 *
 542 * Called by ttm_bo_handle_move_mem()
 543 */
 544static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 545                          struct ttm_operation_ctx *ctx,
 546                          struct ttm_resource *new_mem,
 547                          struct ttm_place *hop)
 548{
 549        struct amdgpu_device *adev;
 550        struct amdgpu_bo *abo;
 551        struct ttm_resource *old_mem = &bo->mem;
 552        int r;
 553
 554        if (new_mem->mem_type == TTM_PL_TT) {
 555                r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
 556                if (r)
 557                        return r;
 558        }
 559
 560        /* Can't move a pinned BO */
 561        abo = ttm_to_amdgpu_bo(bo);
 562        if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
 563                return -EINVAL;
 564
 565        adev = amdgpu_ttm_adev(bo->bdev);
 566
 567        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 568                ttm_bo_move_null(bo, new_mem);
 569                goto out;
 570        }
 571        if (old_mem->mem_type == TTM_PL_SYSTEM &&
 572            new_mem->mem_type == TTM_PL_TT) {
 573                ttm_bo_move_null(bo, new_mem);
 574                goto out;
 575        }
 576        if (old_mem->mem_type == TTM_PL_TT &&
 577            new_mem->mem_type == TTM_PL_SYSTEM) {
 578                r = ttm_bo_wait_ctx(bo, ctx);
 579                if (r)
 580                        return r;
 581
 582                amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
 583                ttm_resource_free(bo, &bo->mem);
 584                ttm_bo_assign_mem(bo, new_mem);
 585                goto out;
 586        }
 587
 588        if (old_mem->mem_type == AMDGPU_PL_GDS ||
 589            old_mem->mem_type == AMDGPU_PL_GWS ||
 590            old_mem->mem_type == AMDGPU_PL_OA ||
 591            new_mem->mem_type == AMDGPU_PL_GDS ||
 592            new_mem->mem_type == AMDGPU_PL_GWS ||
 593            new_mem->mem_type == AMDGPU_PL_OA) {
 594                /* Nothing to save here */
 595                ttm_bo_move_null(bo, new_mem);
 596                goto out;
 597        }
 598
 599        if (adev->mman.buffer_funcs_enabled) {
 600                if (((old_mem->mem_type == TTM_PL_SYSTEM &&
 601                      new_mem->mem_type == TTM_PL_VRAM) ||
 602                     (old_mem->mem_type == TTM_PL_VRAM &&
 603                      new_mem->mem_type == TTM_PL_SYSTEM))) {
 604                        hop->fpfn = 0;
 605                        hop->lpfn = 0;
 606                        hop->mem_type = TTM_PL_TT;
 607                        hop->flags = 0;
 608                        return -EMULTIHOP;
 609                }
 610
 611                r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
 612        } else {
 613                r = -ENODEV;
 614        }
 615
 616        if (r) {
 617                /* Check that all memory is CPU accessible */
 618                if (!amdgpu_mem_visible(adev, old_mem) ||
 619                    !amdgpu_mem_visible(adev, new_mem)) {
 620                        pr_err("Move buffer fallback to memcpy unavailable\n");
 621                        return r;
 622                }
 623
 624                r = ttm_bo_move_memcpy(bo, ctx, new_mem);
 625                if (r)
 626                        return r;
 627        }
 628
 629        if (bo->type == ttm_bo_type_device &&
 630            new_mem->mem_type == TTM_PL_VRAM &&
 631            old_mem->mem_type != TTM_PL_VRAM) {
 632                /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
 633                 * accesses the BO after it's moved.
 634                 */
 635                abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 636        }
 637
 638out:
 639        /* update statistics */
 640        atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
 641        amdgpu_bo_move_notify(bo, evict, new_mem);
 642        return 0;
 643}
 644
 645/*
 646 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
 647 *
 648 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
 649 */
 650static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 651{
 652        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 653        struct drm_mm_node *mm_node = mem->mm_node;
 654        size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
 655
 656        switch (mem->mem_type) {
 657        case TTM_PL_SYSTEM:
 658                /* system memory */
 659                return 0;
 660        case TTM_PL_TT:
 661                break;
 662        case TTM_PL_VRAM:
 663                mem->bus.offset = mem->start << PAGE_SHIFT;
 664                /* check if it's visible */
 665                if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
 666                        return -EINVAL;
 667                /* Only physically contiguous buffers apply. In a contiguous
 668                 * buffer, size of the first mm_node would match the number of
 669                 * pages in ttm_resource.
 670                 */
 671                if (adev->mman.aper_base_kaddr &&
 672                    (mm_node->size == mem->num_pages))
 673                        mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
 674                                        mem->bus.offset;
 675
 676                mem->bus.offset += adev->gmc.aper_base;
 677                mem->bus.is_iomem = true;
 678                mem->bus.caching = ttm_write_combined;
 679                break;
 680        default:
 681                return -EINVAL;
 682        }
 683        return 0;
 684}
 685
 686static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 687                                           unsigned long page_offset)
 688{
 689        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 690        uint64_t offset = (page_offset << PAGE_SHIFT);
 691        struct drm_mm_node *mm;
 692
 693        mm = amdgpu_find_mm_node(&bo->mem, &offset);
 694        offset += adev->gmc.aper_base;
 695        return mm->start + (offset >> PAGE_SHIFT);
 696}
 697
 698/**
 699 * amdgpu_ttm_domain_start - Returns GPU start address
 700 * @adev: amdgpu device object
 701 * @type: type of the memory
 702 *
 703 * Returns:
 704 * GPU start address of a memory domain
 705 */
 706
 707uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
 708{
 709        switch (type) {
 710        case TTM_PL_TT:
 711                return adev->gmc.gart_start;
 712        case TTM_PL_VRAM:
 713                return adev->gmc.vram_start;
 714        }
 715
 716        return 0;
 717}
 718
 719/*
 720 * TTM backend functions.
 721 */
 722struct amdgpu_ttm_tt {
 723        struct ttm_tt   ttm;
 724        struct drm_gem_object   *gobj;
 725        u64                     offset;
 726        uint64_t                userptr;
 727        struct task_struct      *usertask;
 728        uint32_t                userflags;
 729        bool                    bound;
 730#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 731        struct hmm_range        *range;
 732#endif
 733};
 734
 735#ifdef CONFIG_DRM_AMDGPU_USERPTR
 736/*
 737 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
 738 * memory and start HMM tracking CPU page table update
 739 *
 740 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
 741 * once afterwards to stop HMM tracking
 742 */
 743int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
 744{
 745        struct ttm_tt *ttm = bo->tbo.ttm;
 746        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 747        unsigned long start = gtt->userptr;
 748        struct vm_area_struct *vma;
 749        struct hmm_range *range;
 750        unsigned long timeout;
 751        struct mm_struct *mm;
 752        unsigned long i;
 753        int r = 0;
 754
 755        mm = bo->notifier.mm;
 756        if (unlikely(!mm)) {
 757                DRM_DEBUG_DRIVER("BO is not registered?\n");
 758                return -EFAULT;
 759        }
 760
 761        /* Another get_user_pages is running at the same time?? */
 762        if (WARN_ON(gtt->range))
 763                return -EFAULT;
 764
 765        if (!mmget_not_zero(mm)) /* Happens during process shutdown */
 766                return -ESRCH;
 767
 768        range = kzalloc(sizeof(*range), GFP_KERNEL);
 769        if (unlikely(!range)) {
 770                r = -ENOMEM;
 771                goto out;
 772        }
 773        range->notifier = &bo->notifier;
 774        range->start = bo->notifier.interval_tree.start;
 775        range->end = bo->notifier.interval_tree.last + 1;
 776        range->default_flags = HMM_PFN_REQ_FAULT;
 777        if (!amdgpu_ttm_tt_is_readonly(ttm))
 778                range->default_flags |= HMM_PFN_REQ_WRITE;
 779
 780        range->hmm_pfns = kvmalloc_array(ttm->num_pages,
 781                                         sizeof(*range->hmm_pfns), GFP_KERNEL);
 782        if (unlikely(!range->hmm_pfns)) {
 783                r = -ENOMEM;
 784                goto out_free_ranges;
 785        }
 786
 787        mmap_read_lock(mm);
 788        vma = find_vma(mm, start);
 789        if (unlikely(!vma || start < vma->vm_start)) {
 790                r = -EFAULT;
 791                goto out_unlock;
 792        }
 793        if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
 794                vma->vm_file)) {
 795                r = -EPERM;
 796                goto out_unlock;
 797        }
 798        mmap_read_unlock(mm);
 799        timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
 800
 801retry:
 802        range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
 803
 804        mmap_read_lock(mm);
 805        r = hmm_range_fault(range);
 806        mmap_read_unlock(mm);
 807        if (unlikely(r)) {
 808                /*
 809                 * FIXME: This timeout should encompass the retry from
 810                 * mmu_interval_read_retry() as well.
 811                 */
 812                if (r == -EBUSY && !time_after(jiffies, timeout))
 813                        goto retry;
 814                goto out_free_pfns;
 815        }
 816
 817        /*
 818         * Due to default_flags, all pages are HMM_PFN_VALID or
 819         * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
 820         * the notifier_lock, and mmu_interval_read_retry() must be done first.
 821         */
 822        for (i = 0; i < ttm->num_pages; i++)
 823                pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
 824
 825        gtt->range = range;
 826        mmput(mm);
 827
 828        return 0;
 829
 830out_unlock:
 831        mmap_read_unlock(mm);
 832out_free_pfns:
 833        kvfree(range->hmm_pfns);
 834out_free_ranges:
 835        kfree(range);
 836out:
 837        mmput(mm);
 838        return r;
 839}
 840
 841/*
 842 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
 843 * Check if the pages backing this ttm range have been invalidated
 844 *
 845 * Returns: true if pages are still valid
 846 */
 847bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
 848{
 849        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 850        bool r = false;
 851
 852        if (!gtt || !gtt->userptr)
 853                return false;
 854
 855        DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
 856                gtt->userptr, ttm->num_pages);
 857
 858        WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
 859                "No user pages to check\n");
 860
 861        if (gtt->range) {
 862                /*
 863                 * FIXME: Must always hold notifier_lock for this, and must
 864                 * not ignore the return code.
 865                 */
 866                r = mmu_interval_read_retry(gtt->range->notifier,
 867                                         gtt->range->notifier_seq);
 868                kvfree(gtt->range->hmm_pfns);
 869                kfree(gtt->range);
 870                gtt->range = NULL;
 871        }
 872
 873        return !r;
 874}
 875#endif
 876
 877/*
 878 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
 879 *
 880 * Called by amdgpu_cs_list_validate(). This creates the page list
 881 * that backs user memory and will ultimately be mapped into the device
 882 * address space.
 883 */
 884void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 885{
 886        unsigned long i;
 887
 888        for (i = 0; i < ttm->num_pages; ++i)
 889                ttm->pages[i] = pages ? pages[i] : NULL;
 890}
 891
 892/*
 893 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
 894 *
 895 * Called by amdgpu_ttm_backend_bind()
 896 **/
 897static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
 898                                     struct ttm_tt *ttm)
 899{
 900        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 901        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 902        int r;
 903
 904        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 905        enum dma_data_direction direction = write ?
 906                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 907
 908        /* Allocate an SG array and squash pages into it */
 909        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 910                                      ttm->num_pages << PAGE_SHIFT,
 911                                      GFP_KERNEL);
 912        if (r)
 913                goto release_sg;
 914
 915        /* Map SG to device */
 916        r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
 917        if (r)
 918                goto release_sg;
 919
 920        /* convert SG to linear array of pages and dma addresses */
 921        drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 922                                         gtt->ttm.dma_address, ttm->num_pages);
 923
 924        return 0;
 925
 926release_sg:
 927        kfree(ttm->sg);
 928        ttm->sg = NULL;
 929        return r;
 930}
 931
 932/*
 933 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
 934 */
 935static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
 936                                        struct ttm_tt *ttm)
 937{
 938        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 939        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 940
 941        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 942        enum dma_data_direction direction = write ?
 943                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 944
 945        /* double check that we don't free the table twice */
 946        if (!ttm->sg->sgl)
 947                return;
 948
 949        /* unmap the pages mapped to the device */
 950        dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
 951        sg_free_table(ttm->sg);
 952
 953#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 954        if (gtt->range) {
 955                unsigned long i;
 956
 957                for (i = 0; i < ttm->num_pages; i++) {
 958                        if (ttm->pages[i] !=
 959                            hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
 960                                break;
 961                }
 962
 963                WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
 964        }
 965#endif
 966}
 967
 968static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 969                                struct ttm_buffer_object *tbo,
 970                                uint64_t flags)
 971{
 972        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
 973        struct ttm_tt *ttm = tbo->ttm;
 974        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 975        int r;
 976
 977        if (amdgpu_bo_encrypted(abo))
 978                flags |= AMDGPU_PTE_TMZ;
 979
 980        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
 981                uint64_t page_idx = 1;
 982
 983                r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
 984                                ttm->pages, gtt->ttm.dma_address, flags);
 985                if (r)
 986                        goto gart_bind_fail;
 987
 988                /* The memory type of the first page defaults to UC. Now
 989                 * modify the memory type to NC from the second page of
 990                 * the BO onward.
 991                 */
 992                flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
 993                flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 994
 995                r = amdgpu_gart_bind(adev,
 996                                gtt->offset + (page_idx << PAGE_SHIFT),
 997                                ttm->num_pages - page_idx,
 998                                &ttm->pages[page_idx],
 999                                &(gtt->ttm.dma_address[page_idx]), flags);
1000        } else {
1001                r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1002                                     ttm->pages, gtt->ttm.dma_address, flags);
1003        }
1004
1005gart_bind_fail:
1006        if (r)
1007                DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
1008                          ttm->num_pages, gtt->offset);
1009
1010        return r;
1011}
1012
1013/*
1014 * amdgpu_ttm_backend_bind - Bind GTT memory
1015 *
1016 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
1017 * This handles binding GTT memory to the device address space.
1018 */
1019static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
1020                                   struct ttm_tt *ttm,
1021                                   struct ttm_resource *bo_mem)
1022{
1023        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1024        struct amdgpu_ttm_tt *gtt = (void*)ttm;
1025        uint64_t flags;
1026        int r = 0;
1027
1028        if (!bo_mem)
1029                return -EINVAL;
1030
1031        if (gtt->bound)
1032                return 0;
1033
1034        if (gtt->userptr) {
1035                r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
1036                if (r) {
1037                        DRM_ERROR("failed to pin userptr\n");
1038                        return r;
1039                }
1040        }
1041        if (!ttm->num_pages) {
1042                WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
1043                     ttm->num_pages, bo_mem, ttm);
1044        }
1045
1046        if (bo_mem->mem_type == AMDGPU_PL_GDS ||
1047            bo_mem->mem_type == AMDGPU_PL_GWS ||
1048            bo_mem->mem_type == AMDGPU_PL_OA)
1049                return -EINVAL;
1050
1051        if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
1052                gtt->offset = AMDGPU_BO_INVALID_OFFSET;
1053                return 0;
1054        }
1055
1056        /* compute PTE flags relevant to this BO memory */
1057        flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
1058
1059        /* bind pages into GART page tables */
1060        gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
1061        r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
1062                ttm->pages, gtt->ttm.dma_address, flags);
1063
1064        if (r)
1065                DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
1066                          ttm->num_pages, gtt->offset);
1067        gtt->bound = true;
1068        return r;
1069}
1070
1071/*
1072 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
1073 * through AGP or GART aperture.
1074 *
1075 * If bo is accessible through AGP aperture, then use AGP aperture
1076 * to access bo; otherwise allocate logical space in GART aperture
1077 * and map bo to GART aperture.
1078 */
1079int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
1080{
1081        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1082        struct ttm_operation_ctx ctx = { false, false };
1083        struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
1084        struct ttm_resource tmp;
1085        struct ttm_placement placement;
1086        struct ttm_place placements;
1087        uint64_t addr, flags;
1088        int r;
1089
1090        if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
1091                return 0;
1092
1093        addr = amdgpu_gmc_agp_addr(bo);
1094        if (addr != AMDGPU_BO_INVALID_OFFSET) {
1095                bo->mem.start = addr >> PAGE_SHIFT;
1096        } else {
1097
1098                /* allocate GART space */
1099                tmp = bo->mem;
1100                tmp.mm_node = NULL;
1101                placement.num_placement = 1;
1102                placement.placement = &placements;
1103                placement.num_busy_placement = 1;
1104                placement.busy_placement = &placements;
1105                placements.fpfn = 0;
1106                placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
1107                placements.mem_type = TTM_PL_TT;
1108                placements.flags = bo->mem.placement;
1109
1110                r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
1111                if (unlikely(r))
1112                        return r;
1113
1114                /* compute PTE flags for this buffer object */
1115                flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
1116
1117                /* Bind pages */
1118                gtt->offset = (u64)tmp.start << PAGE_SHIFT;
1119                r = amdgpu_ttm_gart_bind(adev, bo, flags);
1120                if (unlikely(r)) {
1121                        ttm_resource_free(bo, &tmp);
1122                        return r;
1123                }
1124
1125                ttm_resource_free(bo, &bo->mem);
1126                bo->mem = tmp;
1127        }
1128
1129        return 0;
1130}
1131
1132/*
1133 * amdgpu_ttm_recover_gart - Rebind GTT pages
1134 *
1135 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1136 * rebind GTT pages during a GPU reset.
1137 */
1138int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1139{
1140        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1141        uint64_t flags;
1142        int r;
1143
1144        if (!tbo->ttm)
1145                return 0;
1146
1147        flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
1148        r = amdgpu_ttm_gart_bind(adev, tbo, flags);
1149
1150        return r;
1151}
1152
1153/*
1154 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1155 *
1156 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1157 * ttm_tt_destroy().
1158 */
1159static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
1160                                      struct ttm_tt *ttm)
1161{
1162        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1163        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1164        int r;
1165
1166        if (!gtt->bound)
1167                return;
1168
1169        /* if the pages have userptr pinning then clear that first */
1170        if (gtt->userptr)
1171                amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1172
1173        if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1174                return;
1175
1176        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1177        r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1178        if (r)
1179                DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
1180                          gtt->ttm.num_pages, gtt->offset);
1181        gtt->bound = false;
1182}
1183
1184static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
1185                                       struct ttm_tt *ttm)
1186{
1187        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1188
1189        amdgpu_ttm_backend_unbind(bdev, ttm);
1190        ttm_tt_destroy_common(bdev, ttm);
1191        if (gtt->usertask)
1192                put_task_struct(gtt->usertask);
1193
1194        ttm_tt_fini(&gtt->ttm);
1195        kfree(gtt);
1196}
1197
1198/**
1199 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1200 *
1201 * @bo: The buffer object to create a GTT ttm_tt object around
1202 * @page_flags: Page flags to be added to the ttm_tt object
1203 *
1204 * Called by ttm_tt_create().
1205 */
1206static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1207                                           uint32_t page_flags)
1208{
1209        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1210        struct amdgpu_ttm_tt *gtt;
1211        enum ttm_caching caching;
1212
1213        gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
1214        if (gtt == NULL) {
1215                return NULL;
1216        }
1217        gtt->gobj = &bo->base;
1218
1219        if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1220                caching = ttm_write_combined;
1221        else
1222                caching = ttm_cached;
1223
1224        /* allocate space for the uninitialized page entries */
1225        if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
1226                kfree(gtt);
1227                return NULL;
1228        }
1229        return &gtt->ttm;
1230}
1231
1232/*
1233 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1234 *
1235 * Map the pages of a ttm_tt object to an address space visible
1236 * to the underlying device.
1237 */
1238static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
1239                                  struct ttm_tt *ttm,
1240                                  struct ttm_operation_ctx *ctx)
1241{
1242        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1243        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1244
1245        /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1246        if (gtt && gtt->userptr) {
1247                ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
1248                if (!ttm->sg)
1249                        return -ENOMEM;
1250
1251                ttm->page_flags |= TTM_PAGE_FLAG_SG;
1252                return 0;
1253        }
1254
1255        if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
1256                if (!ttm->sg) {
1257                        struct dma_buf_attachment *attach;
1258                        struct sg_table *sgt;
1259
1260                        attach = gtt->gobj->import_attach;
1261                        sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1262                        if (IS_ERR(sgt))
1263                                return PTR_ERR(sgt);
1264
1265                        ttm->sg = sgt;
1266                }
1267
1268                drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1269                                                 gtt->ttm.dma_address,
1270                                                 ttm->num_pages);
1271                return 0;
1272        }
1273
1274        return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
1275}
1276
1277/*
1278 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1279 *
1280 * Unmaps pages of a ttm_tt object from the device address space and
1281 * unpopulates the page array backing it.
1282 */
1283static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1284                                     struct ttm_tt *ttm)
1285{
1286        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1287        struct amdgpu_device *adev;
1288
1289        if (gtt && gtt->userptr) {
1290                amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1291                kfree(ttm->sg);
1292                ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
1293                return;
1294        }
1295
1296        if (ttm->sg && gtt->gobj->import_attach) {
1297                struct dma_buf_attachment *attach;
1298
1299                attach = gtt->gobj->import_attach;
1300                dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1301                ttm->sg = NULL;
1302                return;
1303        }
1304
1305        if (ttm->page_flags & TTM_PAGE_FLAG_SG)
1306                return;
1307
1308        adev = amdgpu_ttm_adev(bdev);
1309        return ttm_pool_free(&adev->mman.bdev.pool, ttm);
1310}
1311
1312/**
1313 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1314 * task
1315 *
1316 * @bo: The ttm_buffer_object to bind this userptr to
1317 * @addr:  The address in the current tasks VM space to use
1318 * @flags: Requirements of userptr object.
1319 *
1320 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
1321 * to current task
1322 */
1323int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1324                              uint64_t addr, uint32_t flags)
1325{
1326        struct amdgpu_ttm_tt *gtt;
1327
1328        if (!bo->ttm) {
1329                /* TODO: We want a separate TTM object type for userptrs */
1330                bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1331                if (bo->ttm == NULL)
1332                        return -ENOMEM;
1333        }
1334
1335        gtt = (void *)bo->ttm;
1336        gtt->userptr = addr;
1337        gtt->userflags = flags;
1338
1339        if (gtt->usertask)
1340                put_task_struct(gtt->usertask);
1341        gtt->usertask = current->group_leader;
1342        get_task_struct(gtt->usertask);
1343
1344        return 0;
1345}
1346
1347/*
1348 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1349 */
1350struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1351{
1352        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1353
1354        if (gtt == NULL)
1355                return NULL;
1356
1357        if (gtt->usertask == NULL)
1358                return NULL;
1359
1360        return gtt->usertask->mm;
1361}
1362
1363/*
1364 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1365 * address range for the current task.
1366 *
1367 */
1368bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1369                                  unsigned long end)
1370{
1371        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1372        unsigned long size;
1373
1374        if (gtt == NULL || !gtt->userptr)
1375                return false;
1376
1377        /* Return false if no part of the ttm_tt object lies within
1378         * the range
1379         */
1380        size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1381        if (gtt->userptr > end || gtt->userptr + size <= start)
1382                return false;
1383
1384        return true;
1385}
1386
1387/*
1388 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1389 */
1390bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1391{
1392        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1393
1394        if (gtt == NULL || !gtt->userptr)
1395                return false;
1396
1397        return true;
1398}
1399
1400/*
1401 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1402 */
1403bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1404{
1405        struct amdgpu_ttm_tt *gtt = (void *)ttm;
1406
1407        if (gtt == NULL)
1408                return false;
1409
1410        return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1411}
1412
1413/**
1414 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1415 *
1416 * @ttm: The ttm_tt object to compute the flags for
1417 * @mem: The memory registry backing this ttm_tt object
1418 *
1419 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1420 */
1421uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1422{
1423        uint64_t flags = 0;
1424
1425        if (mem && mem->mem_type != TTM_PL_SYSTEM)
1426                flags |= AMDGPU_PTE_VALID;
1427
1428        if (mem && mem->mem_type == TTM_PL_TT) {
1429                flags |= AMDGPU_PTE_SYSTEM;
1430
1431                if (ttm->caching == ttm_cached)
1432                        flags |= AMDGPU_PTE_SNOOPED;
1433        }
1434
1435        return flags;
1436}
1437
1438/**
1439 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1440 *
1441 * @adev: amdgpu_device pointer
1442 * @ttm: The ttm_tt object to compute the flags for
1443 * @mem: The memory registry backing this ttm_tt object
1444 *
1445 * Figure out the flags to use for a VM PTE (Page Table Entry).
1446 */
1447uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1448                                 struct ttm_resource *mem)
1449{
1450        uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1451
1452        flags |= adev->gart.gart_pte_flags;
1453        flags |= AMDGPU_PTE_READABLE;
1454
1455        if (!amdgpu_ttm_tt_is_readonly(ttm))
1456                flags |= AMDGPU_PTE_WRITEABLE;
1457
1458        return flags;
1459}
1460
1461/*
1462 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1463 * object.
1464 *
1465 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1466 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1467 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1468 * used to clean out a memory space.
1469 */
1470static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1471                                            const struct ttm_place *place)
1472{
1473        unsigned long num_pages = bo->mem.num_pages;
1474        struct drm_mm_node *node = bo->mem.mm_node;
1475        struct dma_resv_list *flist;
1476        struct dma_fence *f;
1477        int i;
1478
1479        if (bo->type == ttm_bo_type_kernel &&
1480            !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1481                return false;
1482
1483        /* If bo is a KFD BO, check if the bo belongs to the current process.
1484         * If true, then return false as any KFD process needs all its BOs to
1485         * be resident to run successfully
1486         */
1487        flist = dma_resv_get_list(bo->base.resv);
1488        if (flist) {
1489                for (i = 0; i < flist->shared_count; ++i) {
1490                        f = rcu_dereference_protected(flist->shared[i],
1491                                dma_resv_held(bo->base.resv));
1492                        if (amdkfd_fence_check_mm(f, current->mm))
1493                                return false;
1494                }
1495        }
1496
1497        switch (bo->mem.mem_type) {
1498        case TTM_PL_TT:
1499                if (amdgpu_bo_is_amdgpu_bo(bo) &&
1500                    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1501                        return false;
1502                return true;
1503
1504        case TTM_PL_VRAM:
1505                /* Check each drm MM node individually */
1506                while (num_pages) {
1507                        if (place->fpfn < (node->start + node->size) &&
1508                            !(place->lpfn && place->lpfn <= node->start))
1509                                return true;
1510
1511                        num_pages -= node->size;
1512                        ++node;
1513                }
1514                return false;
1515
1516        default:
1517                break;
1518        }
1519
1520        return ttm_bo_eviction_valuable(bo, place);
1521}
1522
1523/**
1524 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1525 *
1526 * @bo:  The buffer object to read/write
1527 * @offset:  Offset into buffer object
1528 * @buf:  Secondary buffer to write/read from
1529 * @len: Length in bytes of access
1530 * @write:  true if writing
1531 *
1532 * This is used to access VRAM that backs a buffer object via MMIO
1533 * access for debugging purposes.
1534 */
1535static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1536                                    unsigned long offset,
1537                                    void *buf, int len, int write)
1538{
1539        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1540        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1541        struct drm_mm_node *nodes;
1542        uint32_t value = 0;
1543        int ret = 0;
1544        uint64_t pos;
1545        unsigned long flags;
1546
1547        if (bo->mem.mem_type != TTM_PL_VRAM)
1548                return -EIO;
1549
1550        pos = offset;
1551        nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
1552        pos += (nodes->start << PAGE_SHIFT);
1553
1554        while (len && pos < adev->gmc.mc_vram_size) {
1555                uint64_t aligned_pos = pos & ~(uint64_t)3;
1556                uint64_t bytes = 4 - (pos & 3);
1557                uint32_t shift = (pos & 3) * 8;
1558                uint32_t mask = 0xffffffff << shift;
1559
1560                if (len < bytes) {
1561                        mask &= 0xffffffff >> (bytes - len) * 8;
1562                        bytes = len;
1563                }
1564
1565                if (mask != 0xffffffff) {
1566                        spin_lock_irqsave(&adev->mmio_idx_lock, flags);
1567                        WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
1568                        WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
1569                        if (!write || mask != 0xffffffff)
1570                                value = RREG32_NO_KIQ(mmMM_DATA);
1571                        if (write) {
1572                                value &= ~mask;
1573                                value |= (*(uint32_t *)buf << shift) & mask;
1574                                WREG32_NO_KIQ(mmMM_DATA, value);
1575                        }
1576                        spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
1577                        if (!write) {
1578                                value = (value & mask) >> shift;
1579                                memcpy(buf, &value, bytes);
1580                        }
1581                } else {
1582                        bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
1583                        bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
1584
1585                        amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
1586                                                  bytes, write);
1587                }
1588
1589                ret += bytes;
1590                buf = (uint8_t *)buf + bytes;
1591                pos += bytes;
1592                len -= bytes;
1593                if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
1594                        ++nodes;
1595                        pos = (nodes->start << PAGE_SHIFT);
1596                }
1597        }
1598
1599        return ret;
1600}
1601
1602static void
1603amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1604{
1605        amdgpu_bo_move_notify(bo, false, NULL);
1606}
1607
1608static struct ttm_bo_driver amdgpu_bo_driver = {
1609        .ttm_tt_create = &amdgpu_ttm_tt_create,
1610        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1611        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1612        .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1613        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1614        .evict_flags = &amdgpu_evict_flags,
1615        .move = &amdgpu_bo_move,
1616        .verify_access = &amdgpu_verify_access,
1617        .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1618        .release_notify = &amdgpu_bo_release_notify,
1619        .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1620        .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1621        .access_memory = &amdgpu_ttm_access_memory,
1622        .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
1623};
1624
1625/*
1626 * Firmware Reservation functions
1627 */
1628/**
1629 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1630 *
1631 * @adev: amdgpu_device pointer
1632 *
1633 * free fw reserved vram if it has been reserved.
1634 */
1635static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1636{
1637        amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1638                NULL, &adev->mman.fw_vram_usage_va);
1639}
1640
1641/**
1642 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1643 *
1644 * @adev: amdgpu_device pointer
1645 *
1646 * create bo vram reservation from fw.
1647 */
1648static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1649{
1650        uint64_t vram_size = adev->gmc.visible_vram_size;
1651
1652        adev->mman.fw_vram_usage_va = NULL;
1653        adev->mman.fw_vram_usage_reserved_bo = NULL;
1654
1655        if (adev->mman.fw_vram_usage_size == 0 ||
1656            adev->mman.fw_vram_usage_size > vram_size)
1657                return 0;
1658
1659        return amdgpu_bo_create_kernel_at(adev,
1660                                          adev->mman.fw_vram_usage_start_offset,
1661                                          adev->mman.fw_vram_usage_size,
1662                                          AMDGPU_GEM_DOMAIN_VRAM,
1663                                          &adev->mman.fw_vram_usage_reserved_bo,
1664                                          &adev->mman.fw_vram_usage_va);
1665}
1666
1667/*
1668 * Memoy training reservation functions
1669 */
1670
1671/**
1672 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1673 *
1674 * @adev: amdgpu_device pointer
1675 *
1676 * free memory training reserved vram if it has been reserved.
1677 */
1678static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1679{
1680        struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1681
1682        ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1683        amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1684        ctx->c2p_bo = NULL;
1685
1686        return 0;
1687}
1688
1689static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
1690{
1691        struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1692
1693        memset(ctx, 0, sizeof(*ctx));
1694
1695        ctx->c2p_train_data_offset =
1696                ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
1697        ctx->p2c_train_data_offset =
1698                (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1699        ctx->train_data_size =
1700                GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1701        
1702        DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1703                        ctx->train_data_size,
1704                        ctx->p2c_train_data_offset,
1705                        ctx->c2p_train_data_offset);
1706}
1707
1708/*
1709 * reserve TMR memory at the top of VRAM which holds
1710 * IP Discovery data and is protected by PSP.
1711 */
1712static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1713{
1714        int ret;
1715        struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1716        bool mem_train_support = false;
1717
1718        if (!amdgpu_sriov_vf(adev)) {
1719                ret = amdgpu_mem_train_support(adev);
1720                if (ret == 1)
1721                        mem_train_support = true;
1722                else if (ret == -1)
1723                        return -EINVAL;
1724                else
1725                        DRM_DEBUG("memory training does not support!\n");
1726        }
1727
1728        /*
1729         * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1730         * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1731         *
1732         * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1733         * discovery data and G6 memory training data respectively
1734         */
1735        adev->mman.discovery_tmr_size =
1736                amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1737        if (!adev->mman.discovery_tmr_size)
1738                adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
1739
1740        if (mem_train_support) {
1741                /* reserve vram for mem train according to TMR location */
1742                amdgpu_ttm_training_data_block_init(adev);
1743                ret = amdgpu_bo_create_kernel_at(adev,
1744                                         ctx->c2p_train_data_offset,
1745                                         ctx->train_data_size,
1746                                         AMDGPU_GEM_DOMAIN_VRAM,
1747                                         &ctx->c2p_bo,
1748                                         NULL);
1749                if (ret) {
1750                        DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
1751                        amdgpu_ttm_training_reserve_vram_fini(adev);
1752                        return ret;
1753                }
1754                ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1755        }
1756
1757        ret = amdgpu_bo_create_kernel_at(adev,
1758                                adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
1759                                adev->mman.discovery_tmr_size,
1760                                AMDGPU_GEM_DOMAIN_VRAM,
1761                                &adev->mman.discovery_memory,
1762                                NULL);
1763        if (ret) {
1764                DRM_ERROR("alloc tmr failed(%d)!\n", ret);
1765                amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1766                return ret;
1767        }
1768
1769        return 0;
1770}
1771
1772/*
1773 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1774 * gtt/vram related fields.
1775 *
1776 * This initializes all of the memory space pools that the TTM layer
1777 * will need such as the GTT space (system memory mapped to the device),
1778 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
1779 * can be mapped per VMID.
1780 */
1781int amdgpu_ttm_init(struct amdgpu_device *adev)
1782{
1783        uint64_t gtt_size;
1784        int r;
1785        u64 vis_vram_limit;
1786
1787        mutex_init(&adev->mman.gtt_window_lock);
1788
1789        /* No others user of address space so set it to 0 */
1790        r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
1791                               adev_to_drm(adev)->anon_inode->i_mapping,
1792                               adev_to_drm(adev)->vma_offset_manager,
1793                               adev->need_swiotlb,
1794                               dma_addressing_limited(adev->dev));
1795        if (r) {
1796                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
1797                return r;
1798        }
1799        adev->mman.initialized = true;
1800
1801        /* Initialize VRAM pool with all of VRAM divided into pages */
1802        r = amdgpu_vram_mgr_init(adev);
1803        if (r) {
1804                DRM_ERROR("Failed initializing VRAM heap.\n");
1805                return r;
1806        }
1807
1808        /* Reduce size of CPU-visible VRAM if requested */
1809        vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
1810        if (amdgpu_vis_vram_limit > 0 &&
1811            vis_vram_limit <= adev->gmc.visible_vram_size)
1812                adev->gmc.visible_vram_size = vis_vram_limit;
1813
1814        /* Change the size here instead of the init above so only lpfn is affected */
1815        amdgpu_ttm_set_buffer_funcs_status(adev, false);
1816#ifdef CONFIG_64BIT
1817        adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
1818                                                adev->gmc.visible_vram_size);
1819#endif
1820
1821        /*
1822         *The reserved vram for firmware must be pinned to the specified
1823         *place on the VRAM, so reserve it early.
1824         */
1825        r = amdgpu_ttm_fw_reserve_vram_init(adev);
1826        if (r) {
1827                return r;
1828        }
1829
1830        /*
1831         * only NAVI10 and onwards ASIC support for IP discovery.
1832         * If IP discovery enabled, a block of memory should be
1833         * reserved for IP discovey.
1834         */
1835        if (adev->mman.discovery_bin) {
1836                r = amdgpu_ttm_reserve_tmr(adev);
1837                if (r)
1838                        return r;
1839        }
1840
1841        /* allocate memory as required for VGA
1842         * This is used for VGA emulation and pre-OS scanout buffers to
1843         * avoid display artifacts while transitioning between pre-OS
1844         * and driver.  */
1845        r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
1846                                       AMDGPU_GEM_DOMAIN_VRAM,
1847                                       &adev->mman.stolen_vga_memory,
1848                                       NULL);
1849        if (r)
1850                return r;
1851        r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
1852                                       adev->mman.stolen_extended_size,
1853                                       AMDGPU_GEM_DOMAIN_VRAM,
1854                                       &adev->mman.stolen_extended_memory,
1855                                       NULL);
1856        if (r)
1857                return r;
1858
1859        DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
1860                 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
1861
1862        /* Compute GTT size, either bsaed on 3/4th the size of RAM size
1863         * or whatever the user passed on module init */
1864        if (amdgpu_gtt_size == -1) {
1865                struct sysinfo si;
1866
1867                si_meminfo(&si);
1868                gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
1869                               adev->gmc.mc_vram_size),
1870                               ((uint64_t)si.totalram * si.mem_unit * 3/4));
1871        }
1872        else
1873                gtt_size = (uint64_t)amdgpu_gtt_size << 20;
1874
1875        /* Initialize GTT memory pool */
1876        r = amdgpu_gtt_mgr_init(adev, gtt_size);
1877        if (r) {
1878                DRM_ERROR("Failed initializing GTT heap.\n");
1879                return r;
1880        }
1881        DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
1882                 (unsigned)(gtt_size / (1024 * 1024)));
1883
1884        /* Initialize various on-chip memory pools */
1885        r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
1886        if (r) {
1887                DRM_ERROR("Failed initializing GDS heap.\n");
1888                return r;
1889        }
1890
1891        r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
1892        if (r) {
1893                DRM_ERROR("Failed initializing gws heap.\n");
1894                return r;
1895        }
1896
1897        r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
1898        if (r) {
1899                DRM_ERROR("Failed initializing oa heap.\n");
1900                return r;
1901        }
1902
1903        return 0;
1904}
1905
1906/*
1907 * amdgpu_ttm_fini - De-initialize the TTM memory pools
1908 */
1909void amdgpu_ttm_fini(struct amdgpu_device *adev)
1910{
1911        if (!adev->mman.initialized)
1912                return;
1913
1914        amdgpu_ttm_training_reserve_vram_fini(adev);
1915        /* return the stolen vga memory back to VRAM */
1916        amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
1917        amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
1918        /* return the IP Discovery TMR memory back to VRAM */
1919        amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
1920        amdgpu_ttm_fw_reserve_vram_fini(adev);
1921
1922        if (adev->mman.aper_base_kaddr)
1923                iounmap(adev->mman.aper_base_kaddr);
1924        adev->mman.aper_base_kaddr = NULL;
1925
1926        amdgpu_vram_mgr_fini(adev);
1927        amdgpu_gtt_mgr_fini(adev);
1928        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
1929        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
1930        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
1931        ttm_bo_device_release(&adev->mman.bdev);
1932        adev->mman.initialized = false;
1933        DRM_INFO("amdgpu: ttm finalized\n");
1934}
1935
1936/**
1937 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
1938 *
1939 * @adev: amdgpu_device pointer
1940 * @enable: true when we can use buffer functions.
1941 *
1942 * Enable/disable use of buffer functions during suspend/resume. This should
1943 * only be called at bootup or when userspace isn't running.
1944 */
1945void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
1946{
1947        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1948        uint64_t size;
1949        int r;
1950
1951        if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
1952            adev->mman.buffer_funcs_enabled == enable)
1953                return;
1954
1955        if (enable) {
1956                struct amdgpu_ring *ring;
1957                struct drm_gpu_scheduler *sched;
1958
1959                ring = adev->mman.buffer_funcs_ring;
1960                sched = &ring->sched;
1961                r = drm_sched_entity_init(&adev->mman.entity,
1962                                          DRM_SCHED_PRIORITY_KERNEL, &sched,
1963                                          1, NULL);
1964                if (r) {
1965                        DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
1966                                  r);
1967                        return;
1968                }
1969        } else {
1970                drm_sched_entity_destroy(&adev->mman.entity);
1971                dma_fence_put(man->move);
1972                man->move = NULL;
1973        }
1974
1975        /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1976        if (enable)
1977                size = adev->gmc.real_vram_size;
1978        else
1979                size = adev->gmc.visible_vram_size;
1980        man->size = size >> PAGE_SHIFT;
1981        adev->mman.buffer_funcs_enabled = enable;
1982}
1983
1984static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
1985{
1986        struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
1987        vm_fault_t ret;
1988
1989        ret = ttm_bo_vm_reserve(bo, vmf);
1990        if (ret)
1991                return ret;
1992
1993        ret = amdgpu_bo_fault_reserve_notify(bo);
1994        if (ret)
1995                goto unlock;
1996
1997        ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1998                                       TTM_BO_VM_NUM_PREFAULT, 1);
1999        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
2000                return ret;
2001
2002unlock:
2003        dma_resv_unlock(bo->base.resv);
2004        return ret;
2005}
2006
2007static struct vm_operations_struct amdgpu_ttm_vm_ops = {
2008        .fault = amdgpu_ttm_fault,
2009        .open = ttm_bo_vm_open,
2010        .close = ttm_bo_vm_close,
2011        .access = ttm_bo_vm_access
2012};
2013
2014int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
2015{
2016        struct drm_file *file_priv = filp->private_data;
2017        struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
2018        int r;
2019
2020        r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
2021        if (unlikely(r != 0))
2022                return r;
2023
2024        vma->vm_ops = &amdgpu_ttm_vm_ops;
2025        return 0;
2026}
2027
2028int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
2029                       uint64_t dst_offset, uint32_t byte_count,
2030                       struct dma_resv *resv,
2031                       struct dma_fence **fence, bool direct_submit,
2032                       bool vm_needs_flush, bool tmz)
2033{
2034        enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
2035                AMDGPU_IB_POOL_DELAYED;
2036        struct amdgpu_device *adev = ring->adev;
2037        struct amdgpu_job *job;
2038
2039        uint32_t max_bytes;
2040        unsigned num_loops, num_dw;
2041        unsigned i;
2042        int r;
2043
2044        if (direct_submit && !ring->sched.ready) {
2045                DRM_ERROR("Trying to move memory with ring turned off.\n");
2046                return -EINVAL;
2047        }
2048
2049        max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2050        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2051        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2052
2053        r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
2054        if (r)
2055                return r;
2056
2057        if (vm_needs_flush) {
2058                job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
2059                job->vm_needs_flush = true;
2060        }
2061        if (resv) {
2062                r = amdgpu_sync_resv(adev, &job->sync, resv,
2063                                     AMDGPU_SYNC_ALWAYS,
2064                                     AMDGPU_FENCE_OWNER_UNDEFINED);
2065                if (r) {
2066                        DRM_ERROR("sync failed (%d).\n", r);
2067                        goto error_free;
2068                }
2069        }
2070
2071        for (i = 0; i < num_loops; i++) {
2072                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2073
2074                amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2075                                        dst_offset, cur_size_in_bytes, tmz);
2076
2077                src_offset += cur_size_in_bytes;
2078                dst_offset += cur_size_in_bytes;
2079                byte_count -= cur_size_in_bytes;
2080        }
2081
2082        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2083        WARN_ON(job->ibs[0].length_dw > num_dw);
2084        if (direct_submit)
2085                r = amdgpu_job_submit_direct(job, ring, fence);
2086        else
2087                r = amdgpu_job_submit(job, &adev->mman.entity,
2088                                      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2089        if (r)
2090                goto error_free;
2091
2092        return r;
2093
2094error_free:
2095        amdgpu_job_free(job);
2096        DRM_ERROR("Error scheduling IBs (%d)\n", r);
2097        return r;
2098}
2099
2100int amdgpu_fill_buffer(struct amdgpu_bo *bo,
2101                       uint32_t src_data,
2102                       struct dma_resv *resv,
2103                       struct dma_fence **fence)
2104{
2105        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2106        uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2107        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2108
2109        struct drm_mm_node *mm_node;
2110        unsigned long num_pages;
2111        unsigned int num_loops, num_dw;
2112
2113        struct amdgpu_job *job;
2114        int r;
2115
2116        if (!adev->mman.buffer_funcs_enabled) {
2117                DRM_ERROR("Trying to clear memory with ring turned off.\n");
2118                return -EINVAL;
2119        }
2120
2121        if (bo->tbo.mem.mem_type == TTM_PL_TT) {
2122                r = amdgpu_ttm_alloc_gart(&bo->tbo);
2123                if (r)
2124                        return r;
2125        }
2126
2127        num_pages = bo->tbo.num_pages;
2128        mm_node = bo->tbo.mem.mm_node;
2129        num_loops = 0;
2130        while (num_pages) {
2131                uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2132
2133                num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
2134                num_pages -= mm_node->size;
2135                ++mm_node;
2136        }
2137        num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
2138
2139        /* for IB padding */
2140        num_dw += 64;
2141
2142        r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
2143                                     &job);
2144        if (r)
2145                return r;
2146
2147        if (resv) {
2148                r = amdgpu_sync_resv(adev, &job->sync, resv,
2149                                     AMDGPU_SYNC_ALWAYS,
2150                                     AMDGPU_FENCE_OWNER_UNDEFINED);
2151                if (r) {
2152                        DRM_ERROR("sync failed (%d).\n", r);
2153                        goto error_free;
2154                }
2155        }
2156
2157        num_pages = bo->tbo.num_pages;
2158        mm_node = bo->tbo.mem.mm_node;
2159
2160        while (num_pages) {
2161                uint64_t byte_count = mm_node->size << PAGE_SHIFT;
2162                uint64_t dst_addr;
2163
2164                dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
2165                while (byte_count) {
2166                        uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
2167                                                           max_bytes);
2168
2169                        amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
2170                                                dst_addr, cur_size_in_bytes);
2171
2172                        dst_addr += cur_size_in_bytes;
2173                        byte_count -= cur_size_in_bytes;
2174                }
2175
2176                num_pages -= mm_node->size;
2177                ++mm_node;
2178        }
2179
2180        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
2181        WARN_ON(job->ibs[0].length_dw > num_dw);
2182        r = amdgpu_job_submit(job, &adev->mman.entity,
2183                              AMDGPU_FENCE_OWNER_UNDEFINED, fence);
2184        if (r)
2185                goto error_free;
2186
2187        return 0;
2188
2189error_free:
2190        amdgpu_job_free(job);
2191        return r;
2192}
2193
2194#if defined(CONFIG_DEBUG_FS)
2195
2196static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
2197{
2198        struct drm_info_node *node = (struct drm_info_node *)m->private;
2199        unsigned ttm_pl = (uintptr_t)node->info_ent->data;
2200        struct drm_device *dev = node->minor->dev;
2201        struct amdgpu_device *adev = drm_to_adev(dev);
2202        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
2203        struct drm_printer p = drm_seq_file_printer(m);
2204
2205        man->func->debug(man, &p);
2206        return 0;
2207}
2208
2209static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
2210{
2211        struct drm_info_node *node = (struct drm_info_node *)m->private;
2212        struct drm_device *dev = node->minor->dev;
2213        struct amdgpu_device *adev = drm_to_adev(dev);
2214
2215        return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2216}
2217
2218static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
2219        {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
2220        {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
2221        {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
2222        {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
2223        {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
2224        {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
2225};
2226
2227/*
2228 * amdgpu_ttm_vram_read - Linear read access to VRAM
2229 *
2230 * Accesses VRAM via MMIO for debugging purposes.
2231 */
2232static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2233                                    size_t size, loff_t *pos)
2234{
2235        struct amdgpu_device *adev = file_inode(f)->i_private;
2236        ssize_t result = 0;
2237
2238        if (size & 0x3 || *pos & 0x3)
2239                return -EINVAL;
2240
2241        if (*pos >= adev->gmc.mc_vram_size)
2242                return -ENXIO;
2243
2244        size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2245        while (size) {
2246                size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2247                uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2248
2249                amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2250                if (copy_to_user(buf, value, bytes))
2251                        return -EFAULT;
2252
2253                result += bytes;
2254                buf += bytes;
2255                *pos += bytes;
2256                size -= bytes;
2257        }
2258
2259        return result;
2260}
2261
2262/*
2263 * amdgpu_ttm_vram_write - Linear write access to VRAM
2264 *
2265 * Accesses VRAM via MMIO for debugging purposes.
2266 */
2267static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2268                                    size_t size, loff_t *pos)
2269{
2270        struct amdgpu_device *adev = file_inode(f)->i_private;
2271        ssize_t result = 0;
2272        int r;
2273
2274        if (size & 0x3 || *pos & 0x3)
2275                return -EINVAL;
2276
2277        if (*pos >= adev->gmc.mc_vram_size)
2278                return -ENXIO;
2279
2280        while (size) {
2281                unsigned long flags;
2282                uint32_t value;
2283
2284                if (*pos >= adev->gmc.mc_vram_size)
2285                        return result;
2286
2287                r = get_user(value, (uint32_t *)buf);
2288                if (r)
2289                        return r;
2290
2291                spin_lock_irqsave(&adev->mmio_idx_lock, flags);
2292                WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
2293                WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
2294                WREG32_NO_KIQ(mmMM_DATA, value);
2295                spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
2296
2297                result += 4;
2298                buf += 4;
2299                *pos += 4;
2300                size -= 4;
2301        }
2302
2303        return result;
2304}
2305
2306static const struct file_operations amdgpu_ttm_vram_fops = {
2307        .owner = THIS_MODULE,
2308        .read = amdgpu_ttm_vram_read,
2309        .write = amdgpu_ttm_vram_write,
2310        .llseek = default_llseek,
2311};
2312
2313#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2314
2315/*
2316 * amdgpu_ttm_gtt_read - Linear read access to GTT memory
2317 */
2318static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
2319                                   size_t size, loff_t *pos)
2320{
2321        struct amdgpu_device *adev = file_inode(f)->i_private;
2322        ssize_t result = 0;
2323        int r;
2324
2325        while (size) {
2326                loff_t p = *pos / PAGE_SIZE;
2327                unsigned off = *pos & ~PAGE_MASK;
2328                size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
2329                struct page *page;
2330                void *ptr;
2331
2332                if (p >= adev->gart.num_cpu_pages)
2333                        return result;
2334
2335                page = adev->gart.pages[p];
2336                if (page) {
2337                        ptr = kmap(page);
2338                        ptr += off;
2339
2340                        r = copy_to_user(buf, ptr, cur_size);
2341                        kunmap(adev->gart.pages[p]);
2342                } else
2343                        r = clear_user(buf, cur_size);
2344
2345                if (r)
2346                        return -EFAULT;
2347
2348                result += cur_size;
2349                buf += cur_size;
2350                *pos += cur_size;
2351                size -= cur_size;
2352        }
2353
2354        return result;
2355}
2356
2357static const struct file_operations amdgpu_ttm_gtt_fops = {
2358        .owner = THIS_MODULE,
2359        .read = amdgpu_ttm_gtt_read,
2360        .llseek = default_llseek
2361};
2362
2363#endif
2364
2365/*
2366 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2367 *
2368 * This function is used to read memory that has been mapped to the
2369 * GPU and the known addresses are not physical addresses but instead
2370 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2371 */
2372static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2373                                 size_t size, loff_t *pos)
2374{
2375        struct amdgpu_device *adev = file_inode(f)->i_private;
2376        struct iommu_domain *dom;
2377        ssize_t result = 0;
2378        int r;
2379
2380        /* retrieve the IOMMU domain if any for this device */
2381        dom = iommu_get_domain_for_dev(adev->dev);
2382
2383        while (size) {
2384                phys_addr_t addr = *pos & PAGE_MASK;
2385                loff_t off = *pos & ~PAGE_MASK;
2386                size_t bytes = PAGE_SIZE - off;
2387                unsigned long pfn;
2388                struct page *p;
2389                void *ptr;
2390
2391                bytes = bytes < size ? bytes : size;
2392
2393                /* Translate the bus address to a physical address.  If
2394                 * the domain is NULL it means there is no IOMMU active
2395                 * and the address translation is the identity
2396                 */
2397                addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2398
2399                pfn = addr >> PAGE_SHIFT;
2400                if (!pfn_valid(pfn))
2401                        return -EPERM;
2402
2403                p = pfn_to_page(pfn);
2404                if (p->mapping != adev->mman.bdev.dev_mapping)
2405                        return -EPERM;
2406
2407                ptr = kmap(p);
2408                r = copy_to_user(buf, ptr + off, bytes);
2409                kunmap(p);
2410                if (r)
2411                        return -EFAULT;
2412
2413                size -= bytes;
2414                *pos += bytes;
2415                result += bytes;
2416        }
2417
2418        return result;
2419}
2420
2421/*
2422 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2423 *
2424 * This function is used to write memory that has been mapped to the
2425 * GPU and the known addresses are not physical addresses but instead
2426 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2427 */
2428static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2429                                 size_t size, loff_t *pos)
2430{
2431        struct amdgpu_device *adev = file_inode(f)->i_private;
2432        struct iommu_domain *dom;
2433        ssize_t result = 0;
2434        int r;
2435
2436        dom = iommu_get_domain_for_dev(adev->dev);
2437
2438        while (size) {
2439                phys_addr_t addr = *pos & PAGE_MASK;
2440                loff_t off = *pos & ~PAGE_MASK;
2441                size_t bytes = PAGE_SIZE - off;
2442                unsigned long pfn;
2443                struct page *p;
2444                void *ptr;
2445
2446                bytes = bytes < size ? bytes : size;
2447
2448                addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2449
2450                pfn = addr >> PAGE_SHIFT;
2451                if (!pfn_valid(pfn))
2452                        return -EPERM;
2453
2454                p = pfn_to_page(pfn);
2455                if (p->mapping != adev->mman.bdev.dev_mapping)
2456                        return -EPERM;
2457
2458                ptr = kmap(p);
2459                r = copy_from_user(ptr + off, buf, bytes);
2460                kunmap(p);
2461                if (r)
2462                        return -EFAULT;
2463
2464                size -= bytes;
2465                *pos += bytes;
2466                result += bytes;
2467        }
2468
2469        return result;
2470}
2471
2472static const struct file_operations amdgpu_ttm_iomem_fops = {
2473        .owner = THIS_MODULE,
2474        .read = amdgpu_iomem_read,
2475        .write = amdgpu_iomem_write,
2476        .llseek = default_llseek
2477};
2478
2479static const struct {
2480        char *name;
2481        const struct file_operations *fops;
2482        int domain;
2483} ttm_debugfs_entries[] = {
2484        { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
2485#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
2486        { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
2487#endif
2488        { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
2489};
2490
2491#endif
2492
2493int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2494{
2495#if defined(CONFIG_DEBUG_FS)
2496        unsigned count;
2497
2498        struct drm_minor *minor = adev_to_drm(adev)->primary;
2499        struct dentry *ent, *root = minor->debugfs_root;
2500
2501        for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
2502                ent = debugfs_create_file(
2503                                ttm_debugfs_entries[count].name,
2504                                S_IFREG | S_IRUGO, root,
2505                                adev,
2506                                ttm_debugfs_entries[count].fops);
2507                if (IS_ERR(ent))
2508                        return PTR_ERR(ent);
2509                if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
2510                        i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
2511                else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
2512                        i_size_write(ent->d_inode, adev->gmc.gart_size);
2513                adev->mman.debugfs_entries[count] = ent;
2514        }
2515
2516        count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
2517        return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
2518#else
2519        return 0;
2520#endif
2521}
2522