linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <drm/drmP.h>
  29#include <drm/amdgpu_drm.h>
  30#ifdef CONFIG_X86
  31#include <asm/set_memory.h>
  32#endif
  33#include "amdgpu.h"
  34
  35/*
  36 * GART
  37 * The GART (Graphics Aperture Remapping Table) is an aperture
  38 * in the GPU's address space.  System pages can be mapped into
  39 * the aperture and look like contiguous pages from the GPU's
  40 * perspective.  A page table maps the pages in the aperture
  41 * to the actual backing pages in system memory.
  42 *
  43 * Radeon GPUs support both an internal GART, as described above,
  44 * and AGP.  AGP works similarly, but the GART table is configured
  45 * and maintained by the northbridge rather than the driver.
  46 * Radeon hw has a separate AGP aperture that is programmed to
  47 * point to the AGP aperture provided by the northbridge and the
  48 * requests are passed through to the northbridge aperture.
  49 * Both AGP and internal GART can be used at the same time, however
  50 * that is not currently supported by the driver.
  51 *
  52 * This file handles the common internal GART management.
  53 */
  54
  55/*
  56 * Common GART table functions.
  57 */
  58
  59/**
  60 * amdgpu_dummy_page_init - init dummy page used by the driver
  61 *
  62 * @adev: amdgpu_device pointer
  63 *
  64 * Allocate the dummy page used by the driver (all asics).
  65 * This dummy page is used by the driver as a filler for gart entries
  66 * when pages are taken out of the GART
  67 * Returns 0 on sucess, -ENOMEM on failure.
  68 */
  69static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
  70{
  71        struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
  72
  73        if (adev->dummy_page_addr)
  74                return 0;
  75        adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
  76                                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  77        if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
  78                dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
  79                adev->dummy_page_addr = 0;
  80                return -ENOMEM;
  81        }
  82        return 0;
  83}
  84
  85/**
  86 * amdgpu_dummy_page_fini - free dummy page used by the driver
  87 *
  88 * @adev: amdgpu_device pointer
  89 *
  90 * Frees the dummy page used by the driver (all asics).
  91 */
  92static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
  93{
  94        if (!adev->dummy_page_addr)
  95                return;
  96        pci_unmap_page(adev->pdev, adev->dummy_page_addr,
  97                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  98        adev->dummy_page_addr = 0;
  99}
 100
 101/**
 102 * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
 103 *
 104 * @adev: amdgpu_device pointer
 105 *
 106 * Allocate video memory for GART page table
 107 * (pcie r4xx, r5xx+).  These asics require the
 108 * gart table to be in video memory.
 109 * Returns 0 for success, error for failure.
 110 */
 111int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 112{
 113        int r;
 114
 115        if (adev->gart.robj == NULL) {
 116                r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
 117                                     AMDGPU_GEM_DOMAIN_VRAM,
 118                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 119                                     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
 120                                     ttm_bo_type_kernel, NULL,
 121                                     &adev->gart.robj);
 122                if (r) {
 123                        return r;
 124                }
 125        }
 126        return 0;
 127}
 128
 129/**
 130 * amdgpu_gart_table_vram_pin - pin gart page table in vram
 131 *
 132 * @adev: amdgpu_device pointer
 133 *
 134 * Pin the GART page table in vram so it will not be moved
 135 * by the memory manager (pcie r4xx, r5xx+).  These asics require the
 136 * gart table to be in video memory.
 137 * Returns 0 for success, error for failure.
 138 */
 139int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
 140{
 141        uint64_t gpu_addr;
 142        int r;
 143
 144        r = amdgpu_bo_reserve(adev->gart.robj, false);
 145        if (unlikely(r != 0))
 146                return r;
 147        r = amdgpu_bo_pin(adev->gart.robj,
 148                                AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
 149        if (r) {
 150                amdgpu_bo_unreserve(adev->gart.robj);
 151                return r;
 152        }
 153        r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
 154        if (r)
 155                amdgpu_bo_unpin(adev->gart.robj);
 156        amdgpu_bo_unreserve(adev->gart.robj);
 157        adev->gart.table_addr = gpu_addr;
 158        return r;
 159}
 160
 161/**
 162 * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
 163 *
 164 * @adev: amdgpu_device pointer
 165 *
 166 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
 167 * These asics require the gart table to be in video memory.
 168 */
 169void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
 170{
 171        int r;
 172
 173        if (adev->gart.robj == NULL) {
 174                return;
 175        }
 176        r = amdgpu_bo_reserve(adev->gart.robj, true);
 177        if (likely(r == 0)) {
 178                amdgpu_bo_kunmap(adev->gart.robj);
 179                amdgpu_bo_unpin(adev->gart.robj);
 180                amdgpu_bo_unreserve(adev->gart.robj);
 181                adev->gart.ptr = NULL;
 182        }
 183}
 184
 185/**
 186 * amdgpu_gart_table_vram_free - free gart page table vram
 187 *
 188 * @adev: amdgpu_device pointer
 189 *
 190 * Free the video memory used for the GART page table
 191 * (pcie r4xx, r5xx+).  These asics require the gart table to
 192 * be in video memory.
 193 */
 194void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
 195{
 196        if (adev->gart.robj == NULL) {
 197                return;
 198        }
 199        amdgpu_bo_unref(&adev->gart.robj);
 200}
 201
 202/*
 203 * Common gart functions.
 204 */
 205/**
 206 * amdgpu_gart_unbind - unbind pages from the gart page table
 207 *
 208 * @adev: amdgpu_device pointer
 209 * @offset: offset into the GPU's gart aperture
 210 * @pages: number of pages to unbind
 211 *
 212 * Unbinds the requested pages from the gart page table and
 213 * replaces them with the dummy page (all asics).
 214 * Returns 0 for success, -EINVAL for failure.
 215 */
 216int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 217                        int pages)
 218{
 219        unsigned t;
 220        unsigned p;
 221        int i, j;
 222        u64 page_base;
 223        /* Starting from VEGA10, system bit must be 0 to mean invalid. */
 224        uint64_t flags = 0;
 225
 226        if (!adev->gart.ready) {
 227                WARN(1, "trying to unbind memory from uninitialized GART !\n");
 228                return -EINVAL;
 229        }
 230
 231        t = offset / AMDGPU_GPU_PAGE_SIZE;
 232        p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
 233        for (i = 0; i < pages; i++, p++) {
 234#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 235                adev->gart.pages[p] = NULL;
 236#endif
 237                page_base = adev->dummy_page_addr;
 238                if (!adev->gart.ptr)
 239                        continue;
 240
 241                for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
 242                        amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
 243                                               t, page_base, flags);
 244                        page_base += AMDGPU_GPU_PAGE_SIZE;
 245                }
 246        }
 247        mb();
 248        amdgpu_asic_flush_hdp(adev, NULL);
 249        amdgpu_gmc_flush_gpu_tlb(adev, 0);
 250        return 0;
 251}
 252
 253/**
 254 * amdgpu_gart_map - map dma_addresses into GART entries
 255 *
 256 * @adev: amdgpu_device pointer
 257 * @offset: offset into the GPU's gart aperture
 258 * @pages: number of pages to bind
 259 * @dma_addr: DMA addresses of pages
 260 *
 261 * Map the dma_addresses into GART entries (all asics).
 262 * Returns 0 for success, -EINVAL for failure.
 263 */
 264int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
 265                    int pages, dma_addr_t *dma_addr, uint64_t flags,
 266                    void *dst)
 267{
 268        uint64_t page_base;
 269        unsigned i, j, t;
 270
 271        if (!adev->gart.ready) {
 272                WARN(1, "trying to bind memory to uninitialized GART !\n");
 273                return -EINVAL;
 274        }
 275
 276        t = offset / AMDGPU_GPU_PAGE_SIZE;
 277
 278        for (i = 0; i < pages; i++) {
 279                page_base = dma_addr[i];
 280                for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
 281                        amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
 282                        page_base += AMDGPU_GPU_PAGE_SIZE;
 283                }
 284        }
 285        return 0;
 286}
 287
 288/**
 289 * amdgpu_gart_bind - bind pages into the gart page table
 290 *
 291 * @adev: amdgpu_device pointer
 292 * @offset: offset into the GPU's gart aperture
 293 * @pages: number of pages to bind
 294 * @pagelist: pages to bind
 295 * @dma_addr: DMA addresses of pages
 296 *
 297 * Binds the requested pages to the gart page table
 298 * (all asics).
 299 * Returns 0 for success, -EINVAL for failure.
 300 */
 301int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 302                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
 303                     uint64_t flags)
 304{
 305#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 306        unsigned i,t,p;
 307#endif
 308        int r;
 309
 310        if (!adev->gart.ready) {
 311                WARN(1, "trying to bind memory to uninitialized GART !\n");
 312                return -EINVAL;
 313        }
 314
 315#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 316        t = offset / AMDGPU_GPU_PAGE_SIZE;
 317        p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
 318        for (i = 0; i < pages; i++, p++)
 319                adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
 320#endif
 321
 322        if (!adev->gart.ptr)
 323                return 0;
 324
 325        r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
 326                    adev->gart.ptr);
 327        if (r)
 328                return r;
 329
 330        mb();
 331        amdgpu_asic_flush_hdp(adev, NULL);
 332        amdgpu_gmc_flush_gpu_tlb(adev, 0);
 333        return 0;
 334}
 335
 336/**
 337 * amdgpu_gart_init - init the driver info for managing the gart
 338 *
 339 * @adev: amdgpu_device pointer
 340 *
 341 * Allocate the dummy page and init the gart driver info (all asics).
 342 * Returns 0 for success, error for failure.
 343 */
 344int amdgpu_gart_init(struct amdgpu_device *adev)
 345{
 346        int r;
 347
 348        if (adev->dummy_page_addr)
 349                return 0;
 350
 351        /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
 352        if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
 353                DRM_ERROR("Page size is smaller than GPU page size!\n");
 354                return -EINVAL;
 355        }
 356        r = amdgpu_gart_dummy_page_init(adev);
 357        if (r)
 358                return r;
 359        /* Compute table size */
 360        adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
 361        adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
 362        DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
 363                 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
 364
 365#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 366        /* Allocate pages table */
 367        adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
 368        if (adev->gart.pages == NULL)
 369                return -ENOMEM;
 370#endif
 371
 372        return 0;
 373}
 374
 375/**
 376 * amdgpu_gart_fini - tear down the driver info for managing the gart
 377 *
 378 * @adev: amdgpu_device pointer
 379 *
 380 * Tear down the gart driver info and free the dummy page (all asics).
 381 */
 382void amdgpu_gart_fini(struct amdgpu_device *adev)
 383{
 384#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 385        vfree(adev->gart.pages);
 386        adev->gart.pages = NULL;
 387#endif
 388        amdgpu_gart_dummy_page_fini(adev);
 389}
 390