linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König
  23 */
  24#ifndef __AMDGPU_VM_H__
  25#define __AMDGPU_VM_H__
  26
  27#include <linux/idr.h>
  28#include <linux/kfifo.h>
  29#include <linux/rbtree.h>
  30#include <drm/gpu_scheduler.h>
  31#include <drm/drm_file.h>
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <linux/sched/mm.h>
  34
  35#include "amdgpu_sync.h"
  36#include "amdgpu_ring.h"
  37#include "amdgpu_ids.h"
  38
  39struct amdgpu_bo_va;
  40struct amdgpu_job;
  41struct amdgpu_bo_list_entry;
  42
  43/*
  44 * GPUVM handling
  45 */
  46
  47/* Maximum number of PTEs the hardware can write with one command */
  48#define AMDGPU_VM_MAX_UPDATE_SIZE       0x3FFFF
  49
  50/* number of entries in page table */
  51#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
  52
  53#define AMDGPU_PTE_VALID        (1ULL << 0)
  54#define AMDGPU_PTE_SYSTEM       (1ULL << 1)
  55#define AMDGPU_PTE_SNOOPED      (1ULL << 2)
  56
  57/* RV+ */
  58#define AMDGPU_PTE_TMZ          (1ULL << 3)
  59
  60/* VI only */
  61#define AMDGPU_PTE_EXECUTABLE   (1ULL << 4)
  62
  63#define AMDGPU_PTE_READABLE     (1ULL << 5)
  64#define AMDGPU_PTE_WRITEABLE    (1ULL << 6)
  65
  66#define AMDGPU_PTE_FRAG(x)      ((x & 0x1fULL) << 7)
  67
  68/* TILED for VEGA10, reserved for older ASICs  */
  69#define AMDGPU_PTE_PRT          (1ULL << 51)
  70
  71/* PDE is handled as PTE for VEGA10 */
  72#define AMDGPU_PDE_PTE          (1ULL << 54)
  73
  74#define AMDGPU_PTE_LOG          (1ULL << 55)
  75
  76/* PTE is handled as PDE for VEGA10 (Translate Further) */
  77#define AMDGPU_PTE_TF           (1ULL << 56)
  78
  79/* PDE Block Fragment Size for VEGA10 */
  80#define AMDGPU_PDE_BFS(a)       ((uint64_t)a << 59)
  81
  82
  83/* For GFX9 */
  84#define AMDGPU_PTE_MTYPE_VG10(a)        ((uint64_t)(a) << 57)
  85#define AMDGPU_PTE_MTYPE_VG10_MASK      AMDGPU_PTE_MTYPE_VG10(3ULL)
  86
  87#define AMDGPU_MTYPE_NC 0
  88#define AMDGPU_MTYPE_CC 2
  89
  90#define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
  91                                | AMDGPU_PTE_SNOOPED    \
  92                                | AMDGPU_PTE_EXECUTABLE \
  93                                | AMDGPU_PTE_READABLE   \
  94                                | AMDGPU_PTE_WRITEABLE  \
  95                                | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
  96
  97/* gfx10 */
  98#define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
  99#define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
 100
 101/* How to program VM fault handling */
 102#define AMDGPU_VM_FAULT_STOP_NEVER      0
 103#define AMDGPU_VM_FAULT_STOP_FIRST      1
 104#define AMDGPU_VM_FAULT_STOP_ALWAYS     2
 105
 106/* Reserve 4MB VRAM for page tables */
 107#define AMDGPU_VM_RESERVED_VRAM         (4ULL << 20)
 108
 109/* max number of VMHUB */
 110#define AMDGPU_MAX_VMHUBS                       3
 111#define AMDGPU_GFXHUB_0                         0
 112#define AMDGPU_MMHUB_0                          1
 113#define AMDGPU_MMHUB_1                          2
 114
 115/* Reserve 2MB at top/bottom of address space for kernel use */
 116#define AMDGPU_VA_RESERVED_SIZE                 (2ULL << 20)
 117
 118/* max vmids dedicated for process */
 119#define AMDGPU_VM_MAX_RESERVED_VMID     1
 120
 121#define AMDGPU_VM_CONTEXT_GFX 0
 122#define AMDGPU_VM_CONTEXT_COMPUTE 1
 123
 124/* See vm_update_mode */
 125#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 126#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
 127
 128/* VMPT level enumerate, and the hiberachy is:
 129 * PDB2->PDB1->PDB0->PTB
 130 */
 131enum amdgpu_vm_level {
 132        AMDGPU_VM_PDB2,
 133        AMDGPU_VM_PDB1,
 134        AMDGPU_VM_PDB0,
 135        AMDGPU_VM_PTB
 136};
 137
 138/* base structure for tracking BO usage in a VM */
 139struct amdgpu_vm_bo_base {
 140        /* constant after initialization */
 141        struct amdgpu_vm                *vm;
 142        struct amdgpu_bo                *bo;
 143
 144        /* protected by bo being reserved */
 145        struct amdgpu_vm_bo_base        *next;
 146
 147        /* protected by spinlock */
 148        struct list_head                vm_status;
 149
 150        /* protected by the BO being reserved */
 151        bool                            moved;
 152};
 153
 154struct amdgpu_vm_pt {
 155        struct amdgpu_vm_bo_base        base;
 156
 157        /* array of page tables, one for each directory entry */
 158        struct amdgpu_vm_pt             *entries;
 159};
 160
 161/* provided by hw blocks that can write ptes, e.g., sdma */
 162struct amdgpu_vm_pte_funcs {
 163        /* number of dw to reserve per operation */
 164        unsigned        copy_pte_num_dw;
 165
 166        /* copy pte entries from GART */
 167        void (*copy_pte)(struct amdgpu_ib *ib,
 168                         uint64_t pe, uint64_t src,
 169                         unsigned count);
 170
 171        /* write pte one entry at a time with addr mapping */
 172        void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
 173                          uint64_t value, unsigned count,
 174                          uint32_t incr);
 175        /* for linear pte/pde updates without addr mapping */
 176        void (*set_pte_pde)(struct amdgpu_ib *ib,
 177                            uint64_t pe,
 178                            uint64_t addr, unsigned count,
 179                            uint32_t incr, uint64_t flags);
 180};
 181
 182struct amdgpu_task_info {
 183        char    process_name[TASK_COMM_LEN];
 184        char    task_name[TASK_COMM_LEN];
 185        pid_t   pid;
 186        pid_t   tgid;
 187};
 188
 189/**
 190 * struct amdgpu_vm_update_params
 191 *
 192 * Encapsulate some VM table update parameters to reduce
 193 * the number of function parameters
 194 *
 195 */
 196struct amdgpu_vm_update_params {
 197
 198        /**
 199         * @adev: amdgpu device we do this update for
 200         */
 201        struct amdgpu_device *adev;
 202
 203        /**
 204         * @vm: optional amdgpu_vm we do this update for
 205         */
 206        struct amdgpu_vm *vm;
 207
 208        /**
 209         * @immediate: if changes should be made immediately
 210         */
 211        bool immediate;
 212
 213        /**
 214         * @unlocked: true if the root BO is not locked
 215         */
 216        bool unlocked;
 217
 218        /**
 219         * @pages_addr:
 220         *
 221         * DMA addresses to use for mapping
 222         */
 223        dma_addr_t *pages_addr;
 224
 225        /**
 226         * @job: job to used for hw submission
 227         */
 228        struct amdgpu_job *job;
 229
 230        /**
 231         * @num_dw_left: number of dw left for the IB
 232         */
 233        unsigned int num_dw_left;
 234};
 235
 236struct amdgpu_vm_update_funcs {
 237        int (*map_table)(struct amdgpu_bo *bo);
 238        int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
 239                       enum amdgpu_sync_mode sync_mode);
 240        int (*update)(struct amdgpu_vm_update_params *p,
 241                      struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
 242                      unsigned count, uint32_t incr, uint64_t flags);
 243        int (*commit)(struct amdgpu_vm_update_params *p,
 244                      struct dma_fence **fence);
 245};
 246
 247struct amdgpu_vm {
 248        /* tree of virtual addresses mapped */
 249        struct rb_root_cached   va;
 250
 251        /* Lock to prevent eviction while we are updating page tables
 252         * use vm_eviction_lock/unlock(vm)
 253         */
 254        struct mutex            eviction_lock;
 255        bool                    evicting;
 256        unsigned int            saved_flags;
 257
 258        /* BOs who needs a validation */
 259        struct list_head        evicted;
 260
 261        /* PT BOs which relocated and their parent need an update */
 262        struct list_head        relocated;
 263
 264        /* per VM BOs moved, but not yet updated in the PT */
 265        struct list_head        moved;
 266
 267        /* All BOs of this VM not currently in the state machine */
 268        struct list_head        idle;
 269
 270        /* regular invalidated BOs, but not yet updated in the PT */
 271        struct list_head        invalidated;
 272        spinlock_t              invalidated_lock;
 273
 274        /* BO mappings freed, but not yet updated in the PT */
 275        struct list_head        freed;
 276
 277        /* contains the page directory */
 278        struct amdgpu_vm_pt     root;
 279        struct dma_fence        *last_update;
 280
 281        /* Scheduler entities for page table updates */
 282        struct drm_sched_entity immediate;
 283        struct drm_sched_entity delayed;
 284
 285        /* Last unlocked submission to the scheduler entities */
 286        struct dma_fence        *last_unlocked;
 287
 288        unsigned int            pasid;
 289        /* dedicated to vm */
 290        struct amdgpu_vmid      *reserved_vmid[AMDGPU_MAX_VMHUBS];
 291
 292        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
 293        bool                                    use_cpu_for_update;
 294
 295        /* Functions to use for VM table updates */
 296        const struct amdgpu_vm_update_funcs     *update_funcs;
 297
 298        /* Flag to indicate ATS support from PTE for GFX9 */
 299        bool                    pte_support_ats;
 300
 301        /* Up to 128 pending retry page faults */
 302        DECLARE_KFIFO(faults, u64, 128);
 303
 304        /* Points to the KFD process VM info */
 305        struct amdkfd_process_info *process_info;
 306
 307        /* List node in amdkfd_process_info.vm_list_head */
 308        struct list_head        vm_list_node;
 309
 310        /* Valid while the PD is reserved or fenced */
 311        uint64_t                pd_phys_addr;
 312
 313        /* Some basic info about the task */
 314        struct amdgpu_task_info task_info;
 315
 316        /* Store positions of group of BOs */
 317        struct ttm_lru_bulk_move lru_bulk_move;
 318        /* mark whether can do the bulk move */
 319        bool                    bulk_moveable;
 320        /* Flag to indicate if VM is used for compute */
 321        bool                    is_compute_context;
 322};
 323
 324struct amdgpu_vm_manager {
 325        /* Handling of VMIDs */
 326        struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
 327        unsigned int                            first_kfd_vmid;
 328
 329        /* Handling of VM fences */
 330        u64                                     fence_context;
 331        unsigned                                seqno[AMDGPU_MAX_RINGS];
 332
 333        uint64_t                                max_pfn;
 334        uint32_t                                num_level;
 335        uint32_t                                block_size;
 336        uint32_t                                fragment_size;
 337        enum amdgpu_vm_level                    root_level;
 338        /* vram base address for page table entry  */
 339        u64                                     vram_base_offset;
 340        /* vm pte handling */
 341        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
 342        struct drm_gpu_scheduler                *vm_pte_scheds[AMDGPU_MAX_RINGS];
 343        unsigned                                vm_pte_num_scheds;
 344        struct amdgpu_ring                      *page_fault;
 345
 346        /* partial resident texture handling */
 347        spinlock_t                              prt_lock;
 348        atomic_t                                num_prt_users;
 349
 350        /* controls how VM page tables are updated for Graphics and Compute.
 351         * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
 352         * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
 353         */
 354        int                                     vm_update_mode;
 355
 356        /* PASID to VM mapping, will be used in interrupt context to
 357         * look up VM of a page fault
 358         */
 359        struct idr                              pasid_idr;
 360        spinlock_t                              pasid_lock;
 361};
 362
 363#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 364#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 365#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 366
 367extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
 368extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
 369
 370void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 371void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 372
 373long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
 374int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 375                   int vm_context, u32 pasid);
 376int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
 377void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 378void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 379void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 380                         struct list_head *validated,
 381                         struct amdgpu_bo_list_entry *entry);
 382bool amdgpu_vm_ready(struct amdgpu_vm *vm);
 383int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 384                              int (*callback)(void *p, struct amdgpu_bo *bo),
 385                              void *param);
 386int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 387int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 388                          struct amdgpu_vm *vm, bool immediate);
 389int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 390                          struct amdgpu_vm *vm,
 391                          struct dma_fence **fence);
 392int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 393                           struct amdgpu_vm *vm);
 394int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 395                        struct amdgpu_bo_va *bo_va,
 396                        bool clear);
 397bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 398void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 399                             struct amdgpu_bo *bo, bool evicted);
 400uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 401struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 402                                       struct amdgpu_bo *bo);
 403struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 404                                      struct amdgpu_vm *vm,
 405                                      struct amdgpu_bo *bo);
 406int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 407                     struct amdgpu_bo_va *bo_va,
 408                     uint64_t addr, uint64_t offset,
 409                     uint64_t size, uint64_t flags);
 410int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
 411                             struct amdgpu_bo_va *bo_va,
 412                             uint64_t addr, uint64_t offset,
 413                             uint64_t size, uint64_t flags);
 414int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 415                       struct amdgpu_bo_va *bo_va,
 416                       uint64_t addr);
 417int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 418                                struct amdgpu_vm *vm,
 419                                uint64_t saddr, uint64_t size);
 420struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
 421                                                         uint64_t addr);
 422void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 423void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 424                      struct amdgpu_bo_va *bo_va);
 425void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 426                           uint32_t fragment_size_default, unsigned max_level,
 427                           unsigned max_bits);
 428int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 429bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 430                                  struct amdgpu_job *job);
 431void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 432
 433void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
 434                             struct amdgpu_task_info *task_info);
 435bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 436                            uint64_t addr);
 437
 438void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
 439
 440void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 441                                struct amdgpu_vm *vm);
 442void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
 443
 444#endif
 445