linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
<<
>>
Prefs
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26#ifndef __AMDGPU_GMC_H__
  27#define __AMDGPU_GMC_H__
  28
  29#include <linux/types.h>
  30
  31#include "amdgpu_irq.h"
  32
  33/* VA hole for 48bit addresses on Vega10 */
  34#define AMDGPU_GMC_HOLE_START   0x0000800000000000ULL
  35#define AMDGPU_GMC_HOLE_END     0xffff800000000000ULL
  36
  37/*
  38 * Hardware is programmed as if the hole doesn't exists with start and end
  39 * address values.
  40 *
  41 * This mask is used to remove the upper 16bits of the VA and so come up with
  42 * the linear addr value.
  43 */
  44#define AMDGPU_GMC_HOLE_MASK    0x0000ffffffffffffULL
  45
  46/*
  47 * Ring size as power of two for the log of recent faults.
  48 */
  49#define AMDGPU_GMC_FAULT_RING_ORDER     8
  50#define AMDGPU_GMC_FAULT_RING_SIZE      (1 << AMDGPU_GMC_FAULT_RING_ORDER)
  51
  52/*
  53 * Hash size as power of two for the log of recent faults
  54 */
  55#define AMDGPU_GMC_FAULT_HASH_ORDER     8
  56#define AMDGPU_GMC_FAULT_HASH_SIZE      (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
  57
  58/*
  59 * Number of IH timestamp ticks until a fault is considered handled
  60 */
  61#define AMDGPU_GMC_FAULT_TIMEOUT        5000ULL
  62
  63struct firmware;
  64
  65/*
  66 * GMC page fault information
  67 */
  68struct amdgpu_gmc_fault {
  69        uint64_t        timestamp;
  70        uint64_t        next:AMDGPU_GMC_FAULT_RING_ORDER;
  71        uint64_t        key:52;
  72};
  73
  74/*
  75 * VMHUB structures, functions & helpers
  76 */
  77struct amdgpu_vmhub {
  78        uint32_t        ctx0_ptb_addr_lo32;
  79        uint32_t        ctx0_ptb_addr_hi32;
  80        uint32_t        vm_inv_eng0_sem;
  81        uint32_t        vm_inv_eng0_req;
  82        uint32_t        vm_inv_eng0_ack;
  83        uint32_t        vm_context0_cntl;
  84        uint32_t        vm_l2_pro_fault_status;
  85        uint32_t        vm_l2_pro_fault_cntl;
  86};
  87
  88/*
  89 * GPU MC structures, functions & helpers
  90 */
  91struct amdgpu_gmc_funcs {
  92        /* flush the vm tlb via mmio */
  93        void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
  94                                uint32_t vmhub, uint32_t flush_type);
  95        /* flush the vm tlb via pasid */
  96        int (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
  97                                        uint32_t flush_type, bool all_hub);
  98        /* flush the vm tlb via ring */
  99        uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
 100                                       uint64_t pd_addr);
 101        /* Change the VMID -> PASID mapping */
 102        void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
 103                                   unsigned pasid);
 104        /* enable/disable PRT support */
 105        void (*set_prt)(struct amdgpu_device *adev, bool enable);
 106        /* map mtype to hardware flags */
 107        uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
 108        /* get the pde for a given mc addr */
 109        void (*get_vm_pde)(struct amdgpu_device *adev, int level,
 110                           u64 *dst, u64 *flags);
 111        /* get the pte flags to use for a BO VA mapping */
 112        void (*get_vm_pte)(struct amdgpu_device *adev,
 113                           struct amdgpu_bo_va_mapping *mapping,
 114                           uint64_t *flags);
 115};
 116
 117struct amdgpu_xgmi {
 118        /* from psp */
 119        u64 node_id;
 120        u64 hive_id;
 121        /* fixed per family */
 122        u64 node_segment_size;
 123        /* physical node (0-3) */
 124        unsigned physical_node_id;
 125        /* number of nodes (0-4) */
 126        unsigned num_physical_nodes;
 127        /* gpu list in the same hive */
 128        struct list_head head;
 129        bool supported;
 130        struct ras_common_if *ras_if;
 131};
 132
 133struct amdgpu_gmc {
 134        /* FB's physical address in MMIO space (for CPU to
 135         * map FB). This is different compared to the agp/
 136         * gart/vram_start/end field as the later is from
 137         * GPU's view and aper_base is from CPU's view.
 138         */
 139        resource_size_t         aper_size;
 140        resource_size_t         aper_base;
 141        /* for some chips with <= 32MB we need to lie
 142         * about vram size near mc fb location */
 143        u64                     mc_vram_size;
 144        u64                     visible_vram_size;
 145        /* AGP aperture start and end in MC address space
 146         * Driver find a hole in the MC address space
 147         * to place AGP by setting MC_VM_AGP_BOT/TOP registers
 148         * Under VMID0, logical address == MC address. AGP
 149         * aperture maps to physical bus or IOVA addressed.
 150         * AGP aperture is used to simulate FB in ZFB case.
 151         * AGP aperture is also used for page table in system
 152         * memory (mainly for APU).
 153         *
 154         */
 155        u64                     agp_size;
 156        u64                     agp_start;
 157        u64                     agp_end;
 158        /* GART aperture start and end in MC address space
 159         * Driver find a hole in the MC address space
 160         * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR
 161         * registers
 162         * Under VMID0, logical address inside GART aperture will
 163         * be translated through gpuvm gart page table to access
 164         * paged system memory
 165         */
 166        u64                     gart_size;
 167        u64                     gart_start;
 168        u64                     gart_end;
 169        /* Frame buffer aperture of this GPU device. Different from
 170         * fb_start (see below), this only covers the local GPU device.
 171         * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
 172         * and calculate vram_start of this local device by adding an
 173         * offset inside the XGMI hive.
 174         * Under VMID0, logical address == MC address
 175         */
 176        u64                     vram_start;
 177        u64                     vram_end;
 178        /* FB region , it's same as local vram region in single GPU, in XGMI
 179         * configuration, this region covers all GPUs in the same hive ,
 180         * each GPU in the hive has the same view of this FB region .
 181         * GPU0's vram starts at offset (0 * segment size) ,
 182         * GPU1 starts at offset (1 * segment size), etc.
 183         */
 184        u64                     fb_start;
 185        u64                     fb_end;
 186        unsigned                vram_width;
 187        u64                     real_vram_size;
 188        int                     vram_mtrr;
 189        u64                     mc_mask;
 190        const struct firmware   *fw;    /* MC firmware */
 191        uint32_t                fw_version;
 192        struct amdgpu_irq_src   vm_fault;
 193        uint32_t                vram_type;
 194        uint8_t                 vram_vendor;
 195        uint32_t                srbm_soft_reset;
 196        bool                    prt_warning;
 197        uint64_t                stolen_size;
 198        uint32_t                sdpif_register;
 199        /* apertures */
 200        u64                     shared_aperture_start;
 201        u64                     shared_aperture_end;
 202        u64                     private_aperture_start;
 203        u64                     private_aperture_end;
 204        /* protects concurrent invalidation */
 205        spinlock_t              invalidate_lock;
 206        bool                    translate_further;
 207        struct kfd_vm_fault_info *vm_fault_info;
 208        atomic_t                vm_fault_info_updated;
 209
 210        struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
 211        struct {
 212                uint64_t        idx:AMDGPU_GMC_FAULT_RING_ORDER;
 213        } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
 214        uint64_t                last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
 215
 216        bool tmz_enabled;
 217
 218        const struct amdgpu_gmc_funcs   *gmc_funcs;
 219
 220        struct amdgpu_xgmi xgmi;
 221        struct amdgpu_irq_src   ecc_irq;
 222};
 223
 224#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
 225#define amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, type, allhub) \
 226        ((adev)->gmc.gmc_funcs->flush_gpu_tlb_pasid \
 227        ((adev), (pasid), (type), (allhub)))
 228#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
 229#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
 230#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
 231#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
 232#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
 233
 234/**
 235 * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
 236 *
 237 * @adev: amdgpu_device pointer
 238 *
 239 * Returns:
 240 * True if full VRAM is visible through the BAR
 241 */
 242static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
 243{
 244        WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
 245
 246        return (gmc->real_vram_size == gmc->visible_vram_size);
 247}
 248
 249/**
 250 * amdgpu_gmc_sign_extend - sign extend the given gmc address
 251 *
 252 * @addr: address to extend
 253 */
 254static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
 255{
 256        if (addr >= AMDGPU_GMC_HOLE_START)
 257                addr |= AMDGPU_GMC_HOLE_END;
 258
 259        return addr;
 260}
 261
 262void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
 263                               uint64_t *addr, uint64_t *flags);
 264int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
 265                                uint32_t gpu_page_idx, uint64_t addr,
 266                                uint64_t flags);
 267uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
 268uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
 269void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
 270                              u64 base);
 271void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
 272                              struct amdgpu_gmc *mc);
 273void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
 274                             struct amdgpu_gmc *mc);
 275bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
 276                              uint16_t pasid, uint64_t timestamp);
 277int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
 278void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
 279int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
 280
 281extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
 282
 283#endif
 284