linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König
  23 */
  24#ifndef __AMDGPU_VM_H__
  25#define __AMDGPU_VM_H__
  26
  27#include <linux/idr.h>
  28#include <linux/kfifo.h>
  29#include <linux/rbtree.h>
  30#include <drm/gpu_scheduler.h>
  31#include <drm/drm_file.h>
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <linux/sched/mm.h>
  34
  35#include "amdgpu_sync.h"
  36#include "amdgpu_ring.h"
  37#include "amdgpu_ids.h"
  38
  39struct amdgpu_bo_va;
  40struct amdgpu_job;
  41struct amdgpu_bo_list_entry;
  42
  43/*
  44 * GPUVM handling
  45 */
  46
  47/* Maximum number of PTEs the hardware can write with one command */
  48#define AMDGPU_VM_MAX_UPDATE_SIZE       0x3FFFF
  49
  50/* number of entries in page table */
  51#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
  52
  53#define AMDGPU_PTE_VALID        (1ULL << 0)
  54#define AMDGPU_PTE_SYSTEM       (1ULL << 1)
  55#define AMDGPU_PTE_SNOOPED      (1ULL << 2)
  56
  57/* RV+ */
  58#define AMDGPU_PTE_TMZ          (1ULL << 3)
  59
  60/* VI only */
  61#define AMDGPU_PTE_EXECUTABLE   (1ULL << 4)
  62
  63#define AMDGPU_PTE_READABLE     (1ULL << 5)
  64#define AMDGPU_PTE_WRITEABLE    (1ULL << 6)
  65
  66#define AMDGPU_PTE_FRAG(x)      ((x & 0x1fULL) << 7)
  67
  68/* TILED for VEGA10, reserved for older ASICs  */
  69#define AMDGPU_PTE_PRT          (1ULL << 51)
  70
  71/* PDE is handled as PTE for VEGA10 */
  72#define AMDGPU_PDE_PTE          (1ULL << 54)
  73
  74#define AMDGPU_PTE_LOG          (1ULL << 55)
  75
  76/* PTE is handled as PDE for VEGA10 (Translate Further) */
  77#define AMDGPU_PTE_TF           (1ULL << 56)
  78
  79/* MALL noalloc for sienna_cichlid, reserved for older ASICs  */
  80#define AMDGPU_PTE_NOALLOC      (1ULL << 58)
  81
  82/* PDE Block Fragment Size for VEGA10 */
  83#define AMDGPU_PDE_BFS(a)       ((uint64_t)a << 59)
  84
  85
  86/* For GFX9 */
  87#define AMDGPU_PTE_MTYPE_VG10(a)        ((uint64_t)(a) << 57)
  88#define AMDGPU_PTE_MTYPE_VG10_MASK      AMDGPU_PTE_MTYPE_VG10(3ULL)
  89
  90#define AMDGPU_MTYPE_NC 0
  91#define AMDGPU_MTYPE_CC 2
  92
  93#define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
  94                                | AMDGPU_PTE_SNOOPED    \
  95                                | AMDGPU_PTE_EXECUTABLE \
  96                                | AMDGPU_PTE_READABLE   \
  97                                | AMDGPU_PTE_WRITEABLE  \
  98                                | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
  99
 100/* gfx10 */
 101#define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
 102#define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
 103
 104/* How to program VM fault handling */
 105#define AMDGPU_VM_FAULT_STOP_NEVER      0
 106#define AMDGPU_VM_FAULT_STOP_FIRST      1
 107#define AMDGPU_VM_FAULT_STOP_ALWAYS     2
 108
 109/* Reserve 4MB VRAM for page tables */
 110#define AMDGPU_VM_RESERVED_VRAM         (8ULL << 20)
 111
 112/* max number of VMHUB */
 113#define AMDGPU_MAX_VMHUBS                       3
 114#define AMDGPU_GFXHUB_0                         0
 115#define AMDGPU_MMHUB_0                          1
 116#define AMDGPU_MMHUB_1                          2
 117
 118/* Reserve 2MB at top/bottom of address space for kernel use */
 119#define AMDGPU_VA_RESERVED_SIZE                 (2ULL << 20)
 120
 121/* max vmids dedicated for process */
 122#define AMDGPU_VM_MAX_RESERVED_VMID     1
 123
 124#define AMDGPU_VM_CONTEXT_GFX 0
 125#define AMDGPU_VM_CONTEXT_COMPUTE 1
 126
 127/* See vm_update_mode */
 128#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 129#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
 130
 131/* VMPT level enumerate, and the hiberachy is:
 132 * PDB2->PDB1->PDB0->PTB
 133 */
 134enum amdgpu_vm_level {
 135        AMDGPU_VM_PDB2,
 136        AMDGPU_VM_PDB1,
 137        AMDGPU_VM_PDB0,
 138        AMDGPU_VM_PTB
 139};
 140
 141/* base structure for tracking BO usage in a VM */
 142struct amdgpu_vm_bo_base {
 143        /* constant after initialization */
 144        struct amdgpu_vm                *vm;
 145        struct amdgpu_bo                *bo;
 146
 147        /* protected by bo being reserved */
 148        struct amdgpu_vm_bo_base        *next;
 149
 150        /* protected by spinlock */
 151        struct list_head                vm_status;
 152
 153        /* protected by the BO being reserved */
 154        bool                            moved;
 155};
 156
 157struct amdgpu_vm_pt {
 158        struct amdgpu_vm_bo_base        base;
 159
 160        /* array of page tables, one for each directory entry */
 161        struct amdgpu_vm_pt             *entries;
 162};
 163
 164/* provided by hw blocks that can write ptes, e.g., sdma */
 165struct amdgpu_vm_pte_funcs {
 166        /* number of dw to reserve per operation */
 167        unsigned        copy_pte_num_dw;
 168
 169        /* copy pte entries from GART */
 170        void (*copy_pte)(struct amdgpu_ib *ib,
 171                         uint64_t pe, uint64_t src,
 172                         unsigned count);
 173
 174        /* write pte one entry at a time with addr mapping */
 175        void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
 176                          uint64_t value, unsigned count,
 177                          uint32_t incr);
 178        /* for linear pte/pde updates without addr mapping */
 179        void (*set_pte_pde)(struct amdgpu_ib *ib,
 180                            uint64_t pe,
 181                            uint64_t addr, unsigned count,
 182                            uint32_t incr, uint64_t flags);
 183};
 184
 185struct amdgpu_task_info {
 186        char    process_name[TASK_COMM_LEN];
 187        char    task_name[TASK_COMM_LEN];
 188        pid_t   pid;
 189        pid_t   tgid;
 190};
 191
 192/**
 193 * struct amdgpu_vm_update_params
 194 *
 195 * Encapsulate some VM table update parameters to reduce
 196 * the number of function parameters
 197 *
 198 */
 199struct amdgpu_vm_update_params {
 200
 201        /**
 202         * @adev: amdgpu device we do this update for
 203         */
 204        struct amdgpu_device *adev;
 205
 206        /**
 207         * @vm: optional amdgpu_vm we do this update for
 208         */
 209        struct amdgpu_vm *vm;
 210
 211        /**
 212         * @immediate: if changes should be made immediately
 213         */
 214        bool immediate;
 215
 216        /**
 217         * @unlocked: true if the root BO is not locked
 218         */
 219        bool unlocked;
 220
 221        /**
 222         * @pages_addr:
 223         *
 224         * DMA addresses to use for mapping
 225         */
 226        dma_addr_t *pages_addr;
 227
 228        /**
 229         * @job: job to used for hw submission
 230         */
 231        struct amdgpu_job *job;
 232
 233        /**
 234         * @num_dw_left: number of dw left for the IB
 235         */
 236        unsigned int num_dw_left;
 237};
 238
 239struct amdgpu_vm_update_funcs {
 240        int (*map_table)(struct amdgpu_bo *bo);
 241        int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
 242                       enum amdgpu_sync_mode sync_mode);
 243        int (*update)(struct amdgpu_vm_update_params *p,
 244                      struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
 245                      unsigned count, uint32_t incr, uint64_t flags);
 246        int (*commit)(struct amdgpu_vm_update_params *p,
 247                      struct dma_fence **fence);
 248};
 249
 250struct amdgpu_vm {
 251        /* tree of virtual addresses mapped */
 252        struct rb_root_cached   va;
 253
 254        /* Lock to prevent eviction while we are updating page tables
 255         * use vm_eviction_lock/unlock(vm)
 256         */
 257        struct mutex            eviction_lock;
 258        bool                    evicting;
 259        unsigned int            saved_flags;
 260
 261        /* BOs who needs a validation */
 262        struct list_head        evicted;
 263
 264        /* PT BOs which relocated and their parent need an update */
 265        struct list_head        relocated;
 266
 267        /* per VM BOs moved, but not yet updated in the PT */
 268        struct list_head        moved;
 269
 270        /* All BOs of this VM not currently in the state machine */
 271        struct list_head        idle;
 272
 273        /* regular invalidated BOs, but not yet updated in the PT */
 274        struct list_head        invalidated;
 275        spinlock_t              invalidated_lock;
 276
 277        /* BO mappings freed, but not yet updated in the PT */
 278        struct list_head        freed;
 279
 280        /* BOs which are invalidated, has been updated in the PTs */
 281        struct list_head        done;
 282
 283        /* contains the page directory */
 284        struct amdgpu_vm_pt     root;
 285        struct dma_fence        *last_update;
 286
 287        /* Scheduler entities for page table updates */
 288        struct drm_sched_entity immediate;
 289        struct drm_sched_entity delayed;
 290
 291        /* Last unlocked submission to the scheduler entities */
 292        struct dma_fence        *last_unlocked;
 293
 294        unsigned int            pasid;
 295        /* dedicated to vm */
 296        struct amdgpu_vmid      *reserved_vmid[AMDGPU_MAX_VMHUBS];
 297
 298        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
 299        bool                                    use_cpu_for_update;
 300
 301        /* Functions to use for VM table updates */
 302        const struct amdgpu_vm_update_funcs     *update_funcs;
 303
 304        /* Flag to indicate ATS support from PTE for GFX9 */
 305        bool                    pte_support_ats;
 306
 307        /* Up to 128 pending retry page faults */
 308        DECLARE_KFIFO(faults, u64, 128);
 309
 310        /* Points to the KFD process VM info */
 311        struct amdkfd_process_info *process_info;
 312
 313        /* List node in amdkfd_process_info.vm_list_head */
 314        struct list_head        vm_list_node;
 315
 316        /* Valid while the PD is reserved or fenced */
 317        uint64_t                pd_phys_addr;
 318
 319        /* Some basic info about the task */
 320        struct amdgpu_task_info task_info;
 321
 322        /* Store positions of group of BOs */
 323        struct ttm_lru_bulk_move lru_bulk_move;
 324        /* mark whether can do the bulk move */
 325        bool                    bulk_moveable;
 326        /* Flag to indicate if VM is used for compute */
 327        bool                    is_compute_context;
 328};
 329
 330struct amdgpu_vm_manager {
 331        /* Handling of VMIDs */
 332        struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
 333        unsigned int                            first_kfd_vmid;
 334
 335        /* Handling of VM fences */
 336        u64                                     fence_context;
 337        unsigned                                seqno[AMDGPU_MAX_RINGS];
 338
 339        uint64_t                                max_pfn;
 340        uint32_t                                num_level;
 341        uint32_t                                block_size;
 342        uint32_t                                fragment_size;
 343        enum amdgpu_vm_level                    root_level;
 344        /* vram base address for page table entry  */
 345        u64                                     vram_base_offset;
 346        /* vm pte handling */
 347        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
 348        struct drm_gpu_scheduler                *vm_pte_scheds[AMDGPU_MAX_RINGS];
 349        unsigned                                vm_pte_num_scheds;
 350        struct amdgpu_ring                      *page_fault;
 351
 352        /* partial resident texture handling */
 353        spinlock_t                              prt_lock;
 354        atomic_t                                num_prt_users;
 355
 356        /* controls how VM page tables are updated for Graphics and Compute.
 357         * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
 358         * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
 359         */
 360        int                                     vm_update_mode;
 361
 362        /* PASID to VM mapping, will be used in interrupt context to
 363         * look up VM of a page fault
 364         */
 365        struct idr                              pasid_idr;
 366        spinlock_t                              pasid_lock;
 367};
 368
 369#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 370#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 371#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 372
 373extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
 374extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
 375
 376void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 377void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 378
 379long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
 380int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 381                   int vm_context, u32 pasid);
 382int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
 383void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 384void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 385void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 386                         struct list_head *validated,
 387                         struct amdgpu_bo_list_entry *entry);
 388bool amdgpu_vm_ready(struct amdgpu_vm *vm);
 389int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 390                              int (*callback)(void *p, struct amdgpu_bo *bo),
 391                              void *param);
 392int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 393int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 394                          struct amdgpu_vm *vm, bool immediate);
 395int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 396                          struct amdgpu_vm *vm,
 397                          struct dma_fence **fence);
 398int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 399                           struct amdgpu_vm *vm);
 400int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 401                        struct amdgpu_bo_va *bo_va,
 402                        bool clear);
 403bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 404void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 405                             struct amdgpu_bo *bo, bool evicted);
 406uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 407struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 408                                       struct amdgpu_bo *bo);
 409struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 410                                      struct amdgpu_vm *vm,
 411                                      struct amdgpu_bo *bo);
 412int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 413                     struct amdgpu_bo_va *bo_va,
 414                     uint64_t addr, uint64_t offset,
 415                     uint64_t size, uint64_t flags);
 416int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
 417                             struct amdgpu_bo_va *bo_va,
 418                             uint64_t addr, uint64_t offset,
 419                             uint64_t size, uint64_t flags);
 420int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 421                       struct amdgpu_bo_va *bo_va,
 422                       uint64_t addr);
 423int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 424                                struct amdgpu_vm *vm,
 425                                uint64_t saddr, uint64_t size);
 426struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
 427                                                         uint64_t addr);
 428void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 429void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 430                      struct amdgpu_bo_va *bo_va);
 431void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
 432                           uint32_t fragment_size_default, unsigned max_level,
 433                           unsigned max_bits);
 434int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 435bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 436                                  struct amdgpu_job *job);
 437void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 438
 439void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
 440                             struct amdgpu_task_info *task_info);
 441bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 442                            uint64_t addr);
 443
 444void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
 445
 446void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 447                                struct amdgpu_vm *vm);
 448void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
 449
 450#if defined(CONFIG_DEBUG_FS)
 451void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
 452#endif
 453
 454#endif
 455