linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
<<
>>
Prefs
   1/*
   2 * Copyright 2018 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26
  27#include <linux/io-64-nonatomic-lo-hi.h>
  28
  29#include "amdgpu.h"
  30#include "amdgpu_gmc.h"
  31#include "amdgpu_ras.h"
  32#include "amdgpu_xgmi.h"
  33
  34#include <drm/drm_drv.h>
  35
  36/**
  37 * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
  38 *
  39 * @adev: amdgpu_device pointer
  40 *
  41 * Allocate video memory for pdb0 and map it for CPU access
  42 * Returns 0 for success, error for failure.
  43 */
  44int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
  45{
  46        int r;
  47        struct amdgpu_bo_param bp;
  48        u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
  49        uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
  50        uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
  51
  52        memset(&bp, 0, sizeof(bp));
  53        bp.size = PAGE_ALIGN((npdes + 1) * 8);
  54        bp.byte_align = PAGE_SIZE;
  55        bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
  56        bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  57                AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  58        bp.type = ttm_bo_type_kernel;
  59        bp.resv = NULL;
  60        bp.bo_ptr_size = sizeof(struct amdgpu_bo);
  61
  62        r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
  63        if (r)
  64                return r;
  65
  66        r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
  67        if (unlikely(r != 0))
  68                goto bo_reserve_failure;
  69
  70        r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
  71        if (r)
  72                goto bo_pin_failure;
  73        r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
  74        if (r)
  75                goto bo_kmap_failure;
  76
  77        amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
  78        return 0;
  79
  80bo_kmap_failure:
  81        amdgpu_bo_unpin(adev->gmc.pdb0_bo);
  82bo_pin_failure:
  83        amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
  84bo_reserve_failure:
  85        amdgpu_bo_unref(&adev->gmc.pdb0_bo);
  86        return r;
  87}
  88
  89/**
  90 * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
  91 *
  92 * @bo: the BO to get the PDE for
  93 * @level: the level in the PD hirarchy
  94 * @addr: resulting addr
  95 * @flags: resulting flags
  96 *
  97 * Get the address and flags to be used for a PDE (Page Directory Entry).
  98 */
  99void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
 100                               uint64_t *addr, uint64_t *flags)
 101{
 102        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 103
 104        switch (bo->tbo.resource->mem_type) {
 105        case TTM_PL_TT:
 106                *addr = bo->tbo.ttm->dma_address[0];
 107                break;
 108        case TTM_PL_VRAM:
 109                *addr = amdgpu_bo_gpu_offset(bo);
 110                break;
 111        default:
 112                *addr = 0;
 113                break;
 114        }
 115        *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
 116        amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
 117}
 118
 119/*
 120 * amdgpu_gmc_pd_addr - return the address of the root directory
 121 */
 122uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
 123{
 124        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 125        uint64_t pd_addr;
 126
 127        /* TODO: move that into ASIC specific code */
 128        if (adev->asic_type >= CHIP_VEGA10) {
 129                uint64_t flags = AMDGPU_PTE_VALID;
 130
 131                amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
 132                pd_addr |= flags;
 133        } else {
 134                pd_addr = amdgpu_bo_gpu_offset(bo);
 135        }
 136        return pd_addr;
 137}
 138
 139/**
 140 * amdgpu_gmc_set_pte_pde - update the page tables using CPU
 141 *
 142 * @adev: amdgpu_device pointer
 143 * @cpu_pt_addr: cpu address of the page table
 144 * @gpu_page_idx: entry in the page table to update
 145 * @addr: dst addr to write into pte/pde
 146 * @flags: access flags
 147 *
 148 * Update the page tables using CPU.
 149 */
 150int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
 151                                uint32_t gpu_page_idx, uint64_t addr,
 152                                uint64_t flags)
 153{
 154        void __iomem *ptr = (void *)cpu_pt_addr;
 155        uint64_t value;
 156
 157        /*
 158         * The following is for PTE only. GART does not have PDEs.
 159        */
 160        value = addr & 0x0000FFFFFFFFF000ULL;
 161        value |= flags;
 162        writeq(value, ptr + (gpu_page_idx * 8));
 163
 164        return 0;
 165}
 166
 167/**
 168 * amdgpu_gmc_agp_addr - return the address in the AGP address space
 169 *
 170 * @bo: TTM BO which needs the address, must be in GTT domain
 171 *
 172 * Tries to figure out how to access the BO through the AGP aperture. Returns
 173 * AMDGPU_BO_INVALID_OFFSET if that is not possible.
 174 */
 175uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 176{
 177        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 178
 179        if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
 180                return AMDGPU_BO_INVALID_OFFSET;
 181
 182        if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
 183                return AMDGPU_BO_INVALID_OFFSET;
 184
 185        return adev->gmc.agp_start + bo->ttm->dma_address[0];
 186}
 187
 188/**
 189 * amdgpu_gmc_vram_location - try to find VRAM location
 190 *
 191 * @adev: amdgpu device structure holding all necessary information
 192 * @mc: memory controller structure holding memory information
 193 * @base: base address at which to put VRAM
 194 *
 195 * Function will try to place VRAM at base address provided
 196 * as parameter.
 197 */
 198void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
 199                              u64 base)
 200{
 201        uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
 202
 203        mc->vram_start = base;
 204        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
 205        if (limit && limit < mc->real_vram_size)
 206                mc->real_vram_size = limit;
 207
 208        if (mc->xgmi.num_physical_nodes == 0) {
 209                mc->fb_start = mc->vram_start;
 210                mc->fb_end = mc->vram_end;
 211        }
 212        dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 213                        mc->mc_vram_size >> 20, mc->vram_start,
 214                        mc->vram_end, mc->real_vram_size >> 20);
 215}
 216
 217/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
 218 *
 219 * @adev: amdgpu device structure holding all necessary information
 220 * @mc: memory controller structure holding memory information
 221 *
 222 * This function is only used if use GART for FB translation. In such
 223 * case, we use sysvm aperture (vmid0 page tables) for both vram
 224 * and gart (aka system memory) access.
 225 *
 226 * GPUVM (and our organization of vmid0 page tables) require sysvm
 227 * aperture to be placed at a location aligned with 8 times of native
 228 * page size. For example, if vm_context0_cntl.page_table_block_size
 229 * is 12, then native page size is 8G (2M*2^12), sysvm should start
 230 * with a 64G aligned address. For simplicity, we just put sysvm at
 231 * address 0. So vram start at address 0 and gart is right after vram.
 232 */
 233void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
 234{
 235        u64 hive_vram_start = 0;
 236        u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
 237        mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
 238        mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
 239        mc->gart_start = hive_vram_end + 1;
 240        mc->gart_end = mc->gart_start + mc->gart_size - 1;
 241        mc->fb_start = hive_vram_start;
 242        mc->fb_end = hive_vram_end;
 243        dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
 244                        mc->mc_vram_size >> 20, mc->vram_start,
 245                        mc->vram_end, mc->real_vram_size >> 20);
 246        dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
 247                        mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 248}
 249
 250/**
 251 * amdgpu_gmc_gart_location - try to find GART location
 252 *
 253 * @adev: amdgpu device structure holding all necessary information
 254 * @mc: memory controller structure holding memory information
 255 *
 256 * Function will place try to place GART before or after VRAM.
 257 * If GART size is bigger than space left then we ajust GART size.
 258 * Thus function will never fails.
 259 */
 260void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
 261{
 262        const uint64_t four_gb = 0x100000000ULL;
 263        u64 size_af, size_bf;
 264        /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
 265        u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
 266
 267        /* VCE doesn't like it when BOs cross a 4GB segment, so align
 268         * the GART base on a 4GB boundary as well.
 269         */
 270        size_bf = mc->fb_start;
 271        size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
 272
 273        if (mc->gart_size > max(size_bf, size_af)) {
 274                dev_warn(adev->dev, "limiting GART\n");
 275                mc->gart_size = max(size_bf, size_af);
 276        }
 277
 278        if ((size_bf >= mc->gart_size && size_bf < size_af) ||
 279            (size_af < mc->gart_size))
 280                mc->gart_start = 0;
 281        else
 282                mc->gart_start = max_mc_address - mc->gart_size + 1;
 283
 284        mc->gart_start &= ~(four_gb - 1);
 285        mc->gart_end = mc->gart_start + mc->gart_size - 1;
 286        dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
 287                        mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 288}
 289
 290/**
 291 * amdgpu_gmc_agp_location - try to find AGP location
 292 * @adev: amdgpu device structure holding all necessary information
 293 * @mc: memory controller structure holding memory information
 294 *
 295 * Function will place try to find a place for the AGP BAR in the MC address
 296 * space.
 297 *
 298 * AGP BAR will be assigned the largest available hole in the address space.
 299 * Should be called after VRAM and GART locations are setup.
 300 */
 301void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
 302{
 303        const uint64_t sixteen_gb = 1ULL << 34;
 304        const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
 305        u64 size_af, size_bf;
 306
 307        if (amdgpu_sriov_vf(adev)) {
 308                mc->agp_start = 0xffffffffffff;
 309                mc->agp_end = 0x0;
 310                mc->agp_size = 0;
 311
 312                return;
 313        }
 314
 315        if (mc->fb_start > mc->gart_start) {
 316                size_bf = (mc->fb_start & sixteen_gb_mask) -
 317                        ALIGN(mc->gart_end + 1, sixteen_gb);
 318                size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
 319        } else {
 320                size_bf = mc->fb_start & sixteen_gb_mask;
 321                size_af = (mc->gart_start & sixteen_gb_mask) -
 322                        ALIGN(mc->fb_end + 1, sixteen_gb);
 323        }
 324
 325        if (size_bf > size_af) {
 326                mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
 327                mc->agp_size = size_bf;
 328        } else {
 329                mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
 330                mc->agp_size = size_af;
 331        }
 332
 333        mc->agp_end = mc->agp_start + mc->agp_size - 1;
 334        dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
 335                        mc->agp_size >> 20, mc->agp_start, mc->agp_end);
 336}
 337
 338/**
 339 * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
 340 *
 341 * @addr: 48 bit physical address, page aligned (36 significant bits)
 342 * @pasid: 16 bit process address space identifier
 343 */
 344static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
 345{
 346        return addr << 4 | pasid;
 347}
 348
 349/**
 350 * amdgpu_gmc_filter_faults - filter VM faults
 351 *
 352 * @adev: amdgpu device structure
 353 * @addr: address of the VM fault
 354 * @pasid: PASID of the process causing the fault
 355 * @timestamp: timestamp of the fault
 356 *
 357 * Returns:
 358 * True if the fault was filtered and should not be processed further.
 359 * False if the fault is a new one and needs to be handled.
 360 */
 361bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
 362                              uint16_t pasid, uint64_t timestamp)
 363{
 364        struct amdgpu_gmc *gmc = &adev->gmc;
 365        uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
 366        struct amdgpu_gmc_fault *fault;
 367        uint32_t hash;
 368
 369        /* If we don't have space left in the ring buffer return immediately */
 370        stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
 371                AMDGPU_GMC_FAULT_TIMEOUT;
 372        if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
 373                return true;
 374
 375        /* Try to find the fault in the hash */
 376        hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
 377        fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
 378        while (fault->timestamp >= stamp) {
 379                uint64_t tmp;
 380
 381                if (atomic64_read(&fault->key) == key)
 382                        return true;
 383
 384                tmp = fault->timestamp;
 385                fault = &gmc->fault_ring[fault->next];
 386
 387                /* Check if the entry was reused */
 388                if (fault->timestamp >= tmp)
 389                        break;
 390        }
 391
 392        /* Add the fault to the ring */
 393        fault = &gmc->fault_ring[gmc->last_fault];
 394        atomic64_set(&fault->key, key);
 395        fault->timestamp = timestamp;
 396
 397        /* And update the hash */
 398        fault->next = gmc->fault_hash[hash].idx;
 399        gmc->fault_hash[hash].idx = gmc->last_fault++;
 400        return false;
 401}
 402
 403/**
 404 * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
 405 *
 406 * @adev: amdgpu device structure
 407 * @addr: address of the VM fault
 408 * @pasid: PASID of the process causing the fault
 409 *
 410 * Remove the address from fault filter, then future vm fault on this address
 411 * will pass to retry fault handler to recover.
 412 */
 413void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
 414                                     uint16_t pasid)
 415{
 416        struct amdgpu_gmc *gmc = &adev->gmc;
 417        uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
 418        struct amdgpu_gmc_fault *fault;
 419        uint32_t hash;
 420        uint64_t tmp;
 421
 422        hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
 423        fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
 424        do {
 425                if (atomic64_cmpxchg(&fault->key, key, 0) == key)
 426                        break;
 427
 428                tmp = fault->timestamp;
 429                fault = &gmc->fault_ring[fault->next];
 430        } while (fault->timestamp < tmp);
 431}
 432
 433int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
 434{
 435        int r;
 436
 437        if (adev->umc.ras_funcs &&
 438            adev->umc.ras_funcs->ras_late_init) {
 439                r = adev->umc.ras_funcs->ras_late_init(adev);
 440                if (r)
 441                        return r;
 442        }
 443
 444        if (adev->mmhub.ras_funcs &&
 445            adev->mmhub.ras_funcs->ras_late_init) {
 446                r = adev->mmhub.ras_funcs->ras_late_init(adev);
 447                if (r)
 448                        return r;
 449        }
 450
 451        if (!adev->gmc.xgmi.connected_to_cpu)
 452                adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
 453
 454        if (adev->gmc.xgmi.ras_funcs &&
 455            adev->gmc.xgmi.ras_funcs->ras_late_init) {
 456                r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
 457                if (r)
 458                        return r;
 459        }
 460
 461        if (adev->hdp.ras_funcs &&
 462            adev->hdp.ras_funcs->ras_late_init) {
 463                r = adev->hdp.ras_funcs->ras_late_init(adev);
 464                if (r)
 465                        return r;
 466        }
 467
 468        if (adev->mca.mp0.ras_funcs &&
 469            adev->mca.mp0.ras_funcs->ras_late_init) {
 470                r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
 471                if (r)
 472                        return r;
 473        }
 474
 475        if (adev->mca.mp1.ras_funcs &&
 476            adev->mca.mp1.ras_funcs->ras_late_init) {
 477                r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
 478                if (r)
 479                        return r;
 480        }
 481
 482        if (adev->mca.mpio.ras_funcs &&
 483            adev->mca.mpio.ras_funcs->ras_late_init) {
 484                r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
 485                if (r)
 486                        return r;
 487        }
 488
 489        return 0;
 490}
 491
 492void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
 493{
 494        if (adev->umc.ras_funcs &&
 495            adev->umc.ras_funcs->ras_fini)
 496                adev->umc.ras_funcs->ras_fini(adev);
 497
 498        if (adev->mmhub.ras_funcs &&
 499            adev->mmhub.ras_funcs->ras_fini)
 500                adev->mmhub.ras_funcs->ras_fini(adev);
 501
 502        if (adev->gmc.xgmi.ras_funcs &&
 503            adev->gmc.xgmi.ras_funcs->ras_fini)
 504                adev->gmc.xgmi.ras_funcs->ras_fini(adev);
 505
 506        if (adev->hdp.ras_funcs &&
 507            adev->hdp.ras_funcs->ras_fini)
 508                adev->hdp.ras_funcs->ras_fini(adev);
 509}
 510
 511        /*
 512         * The latest engine allocation on gfx9/10 is:
 513         * Engine 2, 3: firmware
 514         * Engine 0, 1, 4~16: amdgpu ring,
 515         *                    subject to change when ring number changes
 516         * Engine 17: Gart flushes
 517         */
 518#define GFXHUB_FREE_VM_INV_ENGS_BITMAP          0x1FFF3
 519#define MMHUB_FREE_VM_INV_ENGS_BITMAP           0x1FFF3
 520
 521int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
 522{
 523        struct amdgpu_ring *ring;
 524        unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
 525                {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
 526                GFXHUB_FREE_VM_INV_ENGS_BITMAP};
 527        unsigned i;
 528        unsigned vmhub, inv_eng;
 529
 530        for (i = 0; i < adev->num_rings; ++i) {
 531                ring = adev->rings[i];
 532                vmhub = ring->funcs->vmhub;
 533
 534                if (ring == &adev->mes.ring)
 535                        continue;
 536
 537                inv_eng = ffs(vm_inv_engs[vmhub]);
 538                if (!inv_eng) {
 539                        dev_err(adev->dev, "no VM inv eng for ring %s\n",
 540                                ring->name);
 541                        return -EINVAL;
 542                }
 543
 544                ring->vm_inv_eng = inv_eng - 1;
 545                vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
 546
 547                dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
 548                         ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
 549        }
 550
 551        return 0;
 552}
 553
 554/**
 555 * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
 556 * @adev: amdgpu_device pointer
 557 *
 558 * Check and set if an the device @adev supports Trusted Memory
 559 * Zones (TMZ).
 560 */
 561void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
 562{
 563        switch (adev->asic_type) {
 564        case CHIP_RAVEN:
 565        case CHIP_RENOIR:
 566                if (amdgpu_tmz == 0) {
 567                        adev->gmc.tmz_enabled = false;
 568                        dev_info(adev->dev,
 569                                 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
 570                } else {
 571                        adev->gmc.tmz_enabled = true;
 572                        dev_info(adev->dev,
 573                                 "Trusted Memory Zone (TMZ) feature enabled\n");
 574                }
 575                break;
 576        case CHIP_NAVI10:
 577        case CHIP_NAVI14:
 578        case CHIP_NAVI12:
 579        case CHIP_VANGOGH:
 580        case CHIP_YELLOW_CARP:
 581                /* Don't enable it by default yet.
 582                 */
 583                if (amdgpu_tmz < 1) {
 584                        adev->gmc.tmz_enabled = false;
 585                        dev_info(adev->dev,
 586                                 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
 587                } else {
 588                        adev->gmc.tmz_enabled = true;
 589                        dev_info(adev->dev,
 590                                 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
 591                }
 592                break;
 593        default:
 594                adev->gmc.tmz_enabled = false;
 595                dev_info(adev->dev,
 596                         "Trusted Memory Zone (TMZ) feature not supported\n");
 597                break;
 598        }
 599}
 600
 601/**
 602 * amdgpu_gmc_noretry_set -- set per asic noretry defaults
 603 * @adev: amdgpu_device pointer
 604 *
 605 * Set a per asic default for the no-retry parameter.
 606 *
 607 */
 608void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
 609{
 610        struct amdgpu_gmc *gmc = &adev->gmc;
 611
 612        switch (adev->asic_type) {
 613        case CHIP_VEGA10:
 614        case CHIP_VEGA20:
 615        case CHIP_ARCTURUS:
 616        case CHIP_ALDEBARAN:
 617                /*
 618                 * noretry = 0 will cause kfd page fault tests fail
 619                 * for some ASICs, so set default to 1 for these ASICs.
 620                 */
 621                if (amdgpu_noretry == -1)
 622                        gmc->noretry = 1;
 623                else
 624                        gmc->noretry = amdgpu_noretry;
 625                break;
 626        case CHIP_RAVEN:
 627        default:
 628                /* Raven currently has issues with noretry
 629                 * regardless of what we decide for other
 630                 * asics, we should leave raven with
 631                 * noretry = 0 until we root cause the
 632                 * issues.
 633                 *
 634                 * default this to 0 for now, but we may want
 635                 * to change this in the future for certain
 636                 * GPUs as it can increase performance in
 637                 * certain cases.
 638                 */
 639                if (amdgpu_noretry == -1)
 640                        gmc->noretry = 0;
 641                else
 642                        gmc->noretry = amdgpu_noretry;
 643                break;
 644        }
 645}
 646
 647void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
 648                                   bool enable)
 649{
 650        struct amdgpu_vmhub *hub;
 651        u32 tmp, reg, i;
 652
 653        hub = &adev->vmhub[hub_type];
 654        for (i = 0; i < 16; i++) {
 655                reg = hub->vm_context0_cntl + hub->ctx_distance * i;
 656
 657                tmp = (hub_type == AMDGPU_GFXHUB_0) ?
 658                        RREG32_SOC15_IP(GC, reg) :
 659                        RREG32_SOC15_IP(MMHUB, reg);
 660
 661                if (enable)
 662                        tmp |= hub->vm_cntx_cntl_vm_fault;
 663                else
 664                        tmp &= ~hub->vm_cntx_cntl_vm_fault;
 665
 666                (hub_type == AMDGPU_GFXHUB_0) ?
 667                        WREG32_SOC15_IP(GC, reg, tmp) :
 668                        WREG32_SOC15_IP(MMHUB, reg, tmp);
 669        }
 670}
 671
 672void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
 673{
 674        unsigned size;
 675
 676        /*
 677         * TODO:
 678         * Currently there is a bug where some memory client outside
 679         * of the driver writes to first 8M of VRAM on S3 resume,
 680         * this overrides GART which by default gets placed in first 8M and
 681         * causes VM_FAULTS once GTT is accessed.
 682         * Keep the stolen memory reservation until the while this is not solved.
 683         */
 684        switch (adev->asic_type) {
 685        case CHIP_VEGA10:
 686        case CHIP_RAVEN:
 687        case CHIP_RENOIR:
 688                adev->mman.keep_stolen_vga_memory = true;
 689                break;
 690        default:
 691                adev->mman.keep_stolen_vga_memory = false;
 692                break;
 693        }
 694
 695        if (amdgpu_sriov_vf(adev) ||
 696            !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
 697                size = 0;
 698        } else {
 699                size = amdgpu_gmc_get_vbios_fb_size(adev);
 700
 701                if (adev->mman.keep_stolen_vga_memory)
 702                        size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
 703        }
 704
 705        /* set to 0 if the pre-OS buffer uses up most of vram */
 706        if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
 707                size = 0;
 708
 709        if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
 710                adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
 711                adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
 712        } else {
 713                adev->mman.stolen_vga_size = size;
 714                adev->mman.stolen_extended_size = 0;
 715        }
 716}
 717
 718/**
 719 * amdgpu_gmc_init_pdb0 - initialize PDB0
 720 *
 721 * @adev: amdgpu_device pointer
 722 *
 723 * This function is only used when GART page table is used
 724 * for FB address translatioin. In such a case, we construct
 725 * a 2-level system VM page table: PDB0->PTB, to cover both
 726 * VRAM of the hive and system memory.
 727 *
 728 * PDB0 is static, initialized once on driver initialization.
 729 * The first n entries of PDB0 are used as PTE by setting
 730 * P bit to 1, pointing to VRAM. The n+1'th entry points
 731 * to a big PTB covering system memory.
 732 *
 733 */
 734void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
 735{
 736        int i;
 737        uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
 738        /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
 739         */
 740        u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
 741        u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
 742        u64 vram_addr = adev->vm_manager.vram_base_offset -
 743                adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 744        u64 vram_end = vram_addr + vram_size;
 745        u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
 746        int idx;
 747
 748        if (!drm_dev_enter(adev_to_drm(adev), &idx))
 749                return;
 750
 751        flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
 752        flags |= AMDGPU_PTE_WRITEABLE;
 753        flags |= AMDGPU_PTE_SNOOPED;
 754        flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
 755        flags |= AMDGPU_PDE_PTE;
 756
 757        /* The first n PDE0 entries are used as PTE,
 758         * pointing to vram
 759         */
 760        for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
 761                amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
 762
 763        /* The n+1'th PDE0 entry points to a huge
 764         * PTB who has more than 512 entries each
 765         * pointing to a 4K system page
 766         */
 767        flags = AMDGPU_PTE_VALID;
 768        flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
 769        /* Requires gart_ptb_gpu_pa to be 4K aligned */
 770        amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
 771        drm_dev_exit(idx);
 772}
 773
 774/**
 775 * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
 776 * address
 777 *
 778 * @adev: amdgpu_device pointer
 779 * @mc_addr: MC address of buffer
 780 */
 781uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
 782{
 783        return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
 784}
 785
 786/**
 787 * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
 788 * GPU's view
 789 *
 790 * @adev: amdgpu_device pointer
 791 * @bo: amdgpu buffer object
 792 */
 793uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
 794{
 795        return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
 796}
 797
 798/**
 799 * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
 800 * from CPU's view
 801 *
 802 * @adev: amdgpu_device pointer
 803 * @bo: amdgpu buffer object
 804 */
 805uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
 806{
 807        return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
 808}
 809
 810void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
 811{
 812        /* Some ASICs need to reserve a region of video memory to avoid access
 813         * from driver */
 814        adev->mman.stolen_reserved_offset = 0;
 815        adev->mman.stolen_reserved_size = 0;
 816
 817        switch (adev->asic_type) {
 818        case CHIP_YELLOW_CARP:
 819                if (amdgpu_discovery == 0) {
 820                        adev->mman.stolen_reserved_offset = 0x1ffb0000;
 821                        adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
 822                }
 823                break;
 824        default:
 825                break;
 826        }
 827}
 828