linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König
  23 */
  24#ifndef __AMDGPU_VM_H__
  25#define __AMDGPU_VM_H__
  26
  27#include <linux/idr.h>
  28#include <linux/kfifo.h>
  29#include <linux/rbtree.h>
  30#include <drm/gpu_scheduler.h>
  31#include <drm/drm_file.h>
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <linux/sched/mm.h>
  34
  35#include "amdgpu_sync.h"
  36#include "amdgpu_ring.h"
  37#include "amdgpu_ids.h"
  38
  39struct amdgpu_bo_va;
  40struct amdgpu_job;
  41struct amdgpu_bo_list_entry;
  42
  43/*
  44 * GPUVM handling
  45 */
  46
  47/* Maximum number of PTEs the hardware can write with one command */
  48#define AMDGPU_VM_MAX_UPDATE_SIZE       0x3FFFF
  49
  50/* number of entries in page table */
  51#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
  52
  53#define AMDGPU_PTE_VALID        (1ULL << 0)
  54#define AMDGPU_PTE_SYSTEM       (1ULL << 1)
  55#define AMDGPU_PTE_SNOOPED      (1ULL << 2)
  56
  57/* VI only */
  58#define AMDGPU_PTE_EXECUTABLE   (1ULL << 4)
  59
  60#define AMDGPU_PTE_READABLE     (1ULL << 5)
  61#define AMDGPU_PTE_WRITEABLE    (1ULL << 6)
  62
  63#define AMDGPU_PTE_FRAG(x)      ((x & 0x1fULL) << 7)
  64
  65/* TILED for VEGA10, reserved for older ASICs  */
  66#define AMDGPU_PTE_PRT          (1ULL << 51)
  67
  68/* PDE is handled as PTE for VEGA10 */
  69#define AMDGPU_PDE_PTE          (1ULL << 54)
  70
  71#define AMDGPU_PTE_LOG          (1ULL << 55)
  72
  73/* PTE is handled as PDE for VEGA10 (Translate Further) */
  74#define AMDGPU_PTE_TF           (1ULL << 56)
  75
  76/* PDE Block Fragment Size for VEGA10 */
  77#define AMDGPU_PDE_BFS(a)       ((uint64_t)a << 59)
  78
  79
  80/* For GFX9 */
  81#define AMDGPU_PTE_MTYPE_VG10(a)        ((uint64_t)(a) << 57)
  82#define AMDGPU_PTE_MTYPE_VG10_MASK      AMDGPU_PTE_MTYPE_VG10(3ULL)
  83
  84#define AMDGPU_MTYPE_NC 0
  85#define AMDGPU_MTYPE_CC 2
  86
  87#define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
  88                                | AMDGPU_PTE_SNOOPED    \
  89                                | AMDGPU_PTE_EXECUTABLE \
  90                                | AMDGPU_PTE_READABLE   \
  91                                | AMDGPU_PTE_WRITEABLE  \
  92                                | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
  93
  94/* gfx10 */
  95#define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
  96#define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
  97
  98/* How to programm VM fault handling */
  99#define AMDGPU_VM_FAULT_STOP_NEVER      0
 100#define AMDGPU_VM_FAULT_STOP_FIRST      1
 101#define AMDGPU_VM_FAULT_STOP_ALWAYS     2
 102
 103/* Reserve 4MB VRAM for page tables */
 104#define AMDGPU_VM_RESERVED_VRAM         (4ULL << 20)
 105
 106/* max number of VMHUB */
 107#define AMDGPU_MAX_VMHUBS                       3
 108#define AMDGPU_GFXHUB_0                         0
 109#define AMDGPU_MMHUB_0                          1
 110#define AMDGPU_MMHUB_1                          2
 111
 112/* hardcode that limit for now */
 113#define AMDGPU_VA_RESERVED_SIZE                 (1ULL << 20)
 114
 115/* max vmids dedicated for process */
 116#define AMDGPU_VM_MAX_RESERVED_VMID     1
 117
 118#define AMDGPU_VM_CONTEXT_GFX 0
 119#define AMDGPU_VM_CONTEXT_COMPUTE 1
 120
 121/* See vm_update_mode */
 122#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 123#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
 124
 125/* VMPT level enumerate, and the hiberachy is:
 126 * PDB2->PDB1->PDB0->PTB
 127 */
 128enum amdgpu_vm_level {
 129        AMDGPU_VM_PDB2,
 130        AMDGPU_VM_PDB1,
 131        AMDGPU_VM_PDB0,
 132        AMDGPU_VM_PTB
 133};
 134
 135/* base structure for tracking BO usage in a VM */
 136struct amdgpu_vm_bo_base {
 137        /* constant after initialization */
 138        struct amdgpu_vm                *vm;
 139        struct amdgpu_bo                *bo;
 140
 141        /* protected by bo being reserved */
 142        struct amdgpu_vm_bo_base        *next;
 143
 144        /* protected by spinlock */
 145        struct list_head                vm_status;
 146
 147        /* protected by the BO being reserved */
 148        bool                            moved;
 149};
 150
 151struct amdgpu_vm_pt {
 152        struct amdgpu_vm_bo_base        base;
 153
 154        /* array of page tables, one for each directory entry */
 155        struct amdgpu_vm_pt             *entries;
 156};
 157
 158/* provided by hw blocks that can write ptes, e.g., sdma */
 159struct amdgpu_vm_pte_funcs {
 160        /* number of dw to reserve per operation */
 161        unsigned        copy_pte_num_dw;
 162
 163        /* copy pte entries from GART */
 164        void (*copy_pte)(struct amdgpu_ib *ib,
 165                         uint64_t pe, uint64_t src,
 166                         unsigned count);
 167
 168        /* write pte one entry at a time with addr mapping */
 169        void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
 170                          uint64_t value, unsigned count,
 171                          uint32_t incr);
 172        /* for linear pte/pde updates without addr mapping */
 173        void (*set_pte_pde)(struct amdgpu_ib *ib,
 174                            uint64_t pe,
 175                            uint64_t addr, unsigned count,
 176                            uint32_t incr, uint64_t flags);
 177};
 178
 179struct amdgpu_task_info {
 180        char    process_name[TASK_COMM_LEN];
 181        char    task_name[TASK_COMM_LEN];
 182        pid_t   pid;
 183        pid_t   tgid;
 184};
 185
 186/**
 187 * struct amdgpu_vm_update_params
 188 *
 189 * Encapsulate some VM table update parameters to reduce
 190 * the number of function parameters
 191 *
 192 */
 193struct amdgpu_vm_update_params {
 194
 195        /**
 196         * @adev: amdgpu device we do this update for
 197         */
 198        struct amdgpu_device *adev;
 199
 200        /**
 201         * @vm: optional amdgpu_vm we do this update for
 202         */
 203        struct amdgpu_vm *vm;
 204
 205        /**
 206         * @direct: if changes should be made directly
 207         */
 208        bool direct;
 209
 210        /**
 211         * @pages_addr:
 212         *
 213         * DMA addresses to use for mapping
 214         */
 215        dma_addr_t *pages_addr;
 216
 217        /**
 218         * @job: job to used for hw submission
 219         */
 220        struct amdgpu_job *job;
 221
 222        /**
 223         * @num_dw_left: number of dw left for the IB
 224         */
 225        unsigned int num_dw_left;
 226};
 227
 228struct amdgpu_vm_update_funcs {
 229        int (*map_table)(struct amdgpu_bo *bo);
 230        int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
 231                       struct dma_fence *exclusive);
 232        int (*update)(struct amdgpu_vm_update_params *p,
 233                      struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
 234                      unsigned count, uint32_t incr, uint64_t flags);
 235        int (*commit)(struct amdgpu_vm_update_params *p,
 236                      struct dma_fence **fence);
 237};
 238
 239struct amdgpu_vm {
 240        /* tree of virtual addresses mapped */
 241        struct rb_root_cached   va;
 242
 243        /* Lock to prevent eviction while we are updating page tables
 244         * use vm_eviction_lock/unlock(vm)
 245         */
 246        struct mutex            eviction_lock;
 247        bool                    evicting;
 248        unsigned int            saved_flags;
 249
 250        /* BOs who needs a validation */
 251        struct list_head        evicted;
 252
 253        /* PT BOs which relocated and their parent need an update */
 254        struct list_head        relocated;
 255
 256        /* per VM BOs moved, but not yet updated in the PT */
 257        struct list_head        moved;
 258
 259        /* All BOs of this VM not currently in the state machine */
 260        struct list_head        idle;
 261
 262        /* regular invalidated BOs, but not yet updated in the PT */
 263        struct list_head        invalidated;
 264        spinlock_t              invalidated_lock;
 265
 266        /* BO mappings freed, but not yet updated in the PT */
 267        struct list_head        freed;
 268
 269        /* contains the page directory */
 270        struct amdgpu_vm_pt     root;
 271        struct dma_fence        *last_update;
 272
 273        /* Scheduler entities for page table updates */
 274        struct drm_sched_entity direct;
 275        struct drm_sched_entity delayed;
 276
 277        /* Last submission to the scheduler entities */
 278        struct dma_fence        *last_direct;
 279        struct dma_fence        *last_delayed;
 280
 281        unsigned int            pasid;
 282        /* dedicated to vm */
 283        struct amdgpu_vmid      *reserved_vmid[AMDGPU_MAX_VMHUBS];
 284
 285        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
 286        bool                                    use_cpu_for_update;
 287
 288        /* Functions to use for VM table updates */
 289        const struct amdgpu_vm_update_funcs     *update_funcs;
 290
 291        /* Flag to indicate ATS support from PTE for GFX9 */
 292        bool                    pte_support_ats;
 293
 294        /* Up to 128 pending retry page faults */
 295        DECLARE_KFIFO(faults, u64, 128);
 296
 297        /* Points to the KFD process VM info */
 298        struct amdkfd_process_info *process_info;
 299
 300        /* List node in amdkfd_process_info.vm_list_head */
 301        struct list_head        vm_list_node;
 302
 303        /* Valid while the PD is reserved or fenced */
 304        uint64_t                pd_phys_addr;
 305
 306        /* Some basic info about the task */
 307        struct amdgpu_task_info task_info;
 308
 309        /* Store positions of group of BOs */
 310        struct ttm_lru_bulk_move lru_bulk_move;
 311        /* mark whether can do the bulk move */
 312        bool                    bulk_moveable;
 313        /* Flag to indicate if VM is used for compute */
 314        bool                    is_compute_context;
 315};
 316
 317struct amdgpu_vm_manager {
 318        /* Handling of VMIDs */
 319        struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
 320
 321        /* Handling of VM fences */
 322        u64                                     fence_context;
 323        unsigned                                seqno[AMDGPU_MAX_RINGS];
 324
 325        uint64_t                                max_pfn;
 326        uint32_t                                num_level;
 327        uint32_t                                block_size;
 328        uint32_t                                fragment_size;
 329        enum amdgpu_vm_level                    root_level;
 330        /* vram base address for page table entry  */
 331        u64                                     vram_base_offset;
 332        /* vm pte handling */
 333        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
 334        struct drm_gpu_scheduler                *vm_pte_scheds[AMDGPU_MAX_RINGS];
 335        unsigned                                vm_pte_num_scheds;
 336        struct amdgpu_ring                      *page_fault;
 337
 338        /* partial resident texture handling */
 339        spinlock_t                              prt_lock;
 340        atomic_t                                num_prt_users;
 341
 342        /* controls how VM page tables are updated for Graphics and Compute.
 343         * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
 344         * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
 345         */
 346        int                                     vm_update_mode;
 347
 348        /* PASID to VM mapping, will be used in interrupt context to
 349         * look up VM of a page fault
 350         */
 351        struct idr                              pasid_idr;
 352        spinlock_t                              pasid_lock;
 353
 354        /* counter of mapped memory through xgmi */
 355        uint32_t                                xgmi_map_counter;
 356        struct mutex                            lock_pstate;
 357};
 358
 359#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 360#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 361#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 362
 363extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
 364extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
 365
 366void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 367void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 368
 369long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
 370int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 371                   int vm_context, unsigned int pasid);
 372int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
 373void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 374void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 375void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 376                         struct list_head *validated,
 377                         struct amdgpu_bo_list_entry *entry);
 378bool amdgpu_vm_ready(struct amdgpu_vm *vm);
 379int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 380                              int (*callback)(void *p, struct amdgpu_bo *bo),
 381                              void *param);
 382int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 383int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 384                          struct amdgpu_vm *vm, bool direct);
 385int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 386                          struct amdgpu_vm *vm,
 387                          struct dma_fence **fence);
 388int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 389                           struct amdgpu_vm *vm);
 390int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 391                        struct amdgpu_bo_va *bo_va,
 392                        bool clear);
 393bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 394void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 395                             struct amdgpu_bo *bo, bool evicted);
 396uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 397struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 398                                       struct amdgpu_bo *bo);
 399struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 400                                      struct amdgpu_vm *vm,
 401                                      struct amdgpu_bo *bo);
 402int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 403                     struct amdgpu_bo_va *bo_va,
 404                     uint64_t addr, uint64_t offset,
 405                     uint64_t size, uint64_t flags);
 406int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
 407                             struct amdgpu_bo_va *bo_va,
 408                             uint64_t addr, uint64_t offset,
 409                             uint64_t size, uint64_t flags);
 410int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 411                       struct amdgpu_bo_va *bo_va,
 412                       uint64_t addr);
 413int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 414                                struct amdgpu_vm *vm,
 415                                uint64_t saddr, uint64_t size);
 416struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
 417                                                         uint64_t addr);
 418void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 419void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 420                      struct amdgpu_bo_va *bo_va);
 421void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 422                           uint32_t fragment_size_default, unsigned max_level,
 423                           unsigned max_bits);
 424int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 425bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 426                                  struct amdgpu_job *job);
 427void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 428
 429void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
 430                             struct amdgpu_task_info *task_info);
 431bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
 432                            uint64_t addr);
 433
 434void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
 435
 436void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 437                                struct amdgpu_vm *vm);
 438void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
 439
 440#endif
 441