linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
<<
>>
Prefs
   1/*
   2 * Copyright 2016 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Christian König
  23 */
  24#ifndef __AMDGPU_VM_H__
  25#define __AMDGPU_VM_H__
  26
  27#include <linux/rbtree.h>
  28
  29#include "gpu_scheduler.h"
  30#include "amdgpu_sync.h"
  31#include "amdgpu_ring.h"
  32
  33struct amdgpu_bo_va;
  34struct amdgpu_job;
  35struct amdgpu_bo_list_entry;
  36
  37/*
  38 * GPUVM handling
  39 */
  40
  41/* maximum number of VMIDs */
  42#define AMDGPU_NUM_VM   16
  43
  44/* Maximum number of PTEs the hardware can write with one command */
  45#define AMDGPU_VM_MAX_UPDATE_SIZE       0x3FFFF
  46
  47/* number of entries in page table */
  48#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
  49
  50/* PTBs (Page Table Blocks) need to be aligned to 32K */
  51#define AMDGPU_VM_PTB_ALIGN_SIZE   32768
  52
  53/* LOG2 number of continuous pages for the fragment field */
  54#define AMDGPU_LOG2_PAGES_PER_FRAG 4
  55
  56#define AMDGPU_PTE_VALID        (1ULL << 0)
  57#define AMDGPU_PTE_SYSTEM       (1ULL << 1)
  58#define AMDGPU_PTE_SNOOPED      (1ULL << 2)
  59
  60/* VI only */
  61#define AMDGPU_PTE_EXECUTABLE   (1ULL << 4)
  62
  63#define AMDGPU_PTE_READABLE     (1ULL << 5)
  64#define AMDGPU_PTE_WRITEABLE    (1ULL << 6)
  65
  66#define AMDGPU_PTE_FRAG(x)      ((x & 0x1fULL) << 7)
  67
  68/* TILED for VEGA10, reserved for older ASICs  */
  69#define AMDGPU_PTE_PRT          (1ULL << 51)
  70
  71/* VEGA10 only */
  72#define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
  73#define AMDGPU_PTE_MTYPE_MASK   AMDGPU_PTE_MTYPE(3ULL)
  74
  75/* How to programm VM fault handling */
  76#define AMDGPU_VM_FAULT_STOP_NEVER      0
  77#define AMDGPU_VM_FAULT_STOP_FIRST      1
  78#define AMDGPU_VM_FAULT_STOP_ALWAYS     2
  79
  80/* max number of VMHUB */
  81#define AMDGPU_MAX_VMHUBS                       2
  82#define AMDGPU_GFXHUB                           0
  83#define AMDGPU_MMHUB                            1
  84
  85/* hardcode that limit for now */
  86#define AMDGPU_VA_RESERVED_SIZE                 (8 << 20)
  87/* max vmids dedicated for process */
  88#define AMDGPU_VM_MAX_RESERVED_VMID     1
  89
  90#define AMDGPU_VM_CONTEXT_GFX 0
  91#define AMDGPU_VM_CONTEXT_COMPUTE 1
  92
  93/* See vm_update_mode */
  94#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
  95#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
  96
  97
  98struct amdgpu_vm_pt {
  99        struct amdgpu_bo        *bo;
 100        uint64_t                addr;
 101
 102        /* array of page tables, one for each directory entry */
 103        struct amdgpu_vm_pt     *entries;
 104        unsigned                last_entry_used;
 105};
 106
 107struct amdgpu_vm {
 108        /* tree of virtual addresses mapped */
 109        struct rb_root          va;
 110
 111        /* protecting invalidated */
 112        spinlock_t              status_lock;
 113
 114        /* BOs moved, but not yet updated in the PT */
 115        struct list_head        invalidated;
 116
 117        /* BOs cleared in the PT because of a move */
 118        struct list_head        cleared;
 119
 120        /* BO mappings freed, but not yet updated in the PT */
 121        struct list_head        freed;
 122
 123        /* contains the page directory */
 124        struct amdgpu_vm_pt     root;
 125        struct dma_fence        *last_dir_update;
 126        uint64_t                last_eviction_counter;
 127
 128        /* protecting freed */
 129        spinlock_t              freed_lock;
 130
 131        /* Scheduler entity for page table updates */
 132        struct amd_sched_entity entity;
 133
 134        /* client id */
 135        u64                     client_id;
 136        /* dedicated to vm */
 137        struct amdgpu_vm_id     *reserved_vmid[AMDGPU_MAX_VMHUBS];
 138        /* each VM will map on CSA */
 139        struct amdgpu_bo_va *csa_bo_va;
 140
 141        /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
 142        bool                    use_cpu_for_update;
 143};
 144
 145struct amdgpu_vm_id {
 146        struct list_head        list;
 147        struct amdgpu_sync      active;
 148        struct dma_fence                *last_flush;
 149        atomic64_t              owner;
 150
 151        uint64_t                pd_gpu_addr;
 152        /* last flushed PD/PT update */
 153        struct dma_fence                *flushed_updates;
 154
 155        uint32_t                current_gpu_reset_count;
 156
 157        uint32_t                gds_base;
 158        uint32_t                gds_size;
 159        uint32_t                gws_base;
 160        uint32_t                gws_size;
 161        uint32_t                oa_base;
 162        uint32_t                oa_size;
 163};
 164
 165struct amdgpu_vm_id_manager {
 166        struct mutex            lock;
 167        unsigned                num_ids;
 168        struct list_head        ids_lru;
 169        struct amdgpu_vm_id     ids[AMDGPU_NUM_VM];
 170        atomic_t                reserved_vmid_num;
 171};
 172
 173struct amdgpu_vm_manager {
 174        /* Handling of VMIDs */
 175        struct amdgpu_vm_id_manager             id_mgr[AMDGPU_MAX_VMHUBS];
 176
 177        /* Handling of VM fences */
 178        u64                                     fence_context;
 179        unsigned                                seqno[AMDGPU_MAX_RINGS];
 180
 181        uint64_t                                max_pfn;
 182        uint32_t                                num_level;
 183        uint64_t                                vm_size;
 184        uint32_t                                block_size;
 185        /* vram base address for page table entry  */
 186        u64                                     vram_base_offset;
 187        /* vm pte handling */
 188        const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
 189        struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
 190        unsigned                                vm_pte_num_rings;
 191        atomic_t                                vm_pte_next_ring;
 192        /* client id counter */
 193        atomic64_t                              client_counter;
 194
 195        /* partial resident texture handling */
 196        spinlock_t                              prt_lock;
 197        atomic_t                                num_prt_users;
 198
 199        /* controls how VM page tables are updated for Graphics and Compute.
 200         * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
 201         * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
 202         */
 203        int                                     vm_update_mode;
 204};
 205
 206void amdgpu_vm_manager_init(struct amdgpu_device *adev);
 207void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
 208int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 209                   int vm_context);
 210void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 211void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 212                         struct list_head *validated,
 213                         struct amdgpu_bo_list_entry *entry);
 214int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 215                              int (*callback)(void *p, struct amdgpu_bo *bo),
 216                              void *param);
 217void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 218                                  struct amdgpu_vm *vm);
 219int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
 220                        struct amdgpu_vm *vm,
 221                        uint64_t saddr, uint64_t size);
 222int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 223                      struct amdgpu_sync *sync, struct dma_fence *fence,
 224                      struct amdgpu_job *job);
 225int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 226void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
 227                        unsigned vmid);
 228void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
 229int amdgpu_vm_update_directories(struct amdgpu_device *adev,
 230                                 struct amdgpu_vm *vm);
 231int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 232                          struct amdgpu_vm *vm,
 233                          struct dma_fence **fence);
 234int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 235                             struct amdgpu_sync *sync);
 236int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 237                        struct amdgpu_bo_va *bo_va,
 238                        bool clear);
 239void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 240                             struct amdgpu_bo *bo);
 241struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
 242                                       struct amdgpu_bo *bo);
 243struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 244                                      struct amdgpu_vm *vm,
 245                                      struct amdgpu_bo *bo);
 246int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 247                     struct amdgpu_bo_va *bo_va,
 248                     uint64_t addr, uint64_t offset,
 249                     uint64_t size, uint64_t flags);
 250int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
 251                             struct amdgpu_bo_va *bo_va,
 252                             uint64_t addr, uint64_t offset,
 253                             uint64_t size, uint64_t flags);
 254int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 255                       struct amdgpu_bo_va *bo_va,
 256                       uint64_t addr);
 257int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 258                                struct amdgpu_vm *vm,
 259                                uint64_t saddr, uint64_t size);
 260void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 261                      struct amdgpu_bo_va *bo_va);
 262void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
 263int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
 264bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
 265                                  struct amdgpu_job *job);
 266void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 267
 268#endif
 269