linux/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
<<
>>
Prefs
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <drm/drmP.h>
  29#include <drm/amdgpu_drm.h>
  30#ifdef CONFIG_X86
  31#include <asm/set_memory.h>
  32#endif
  33#include "amdgpu.h"
  34
  35/*
  36 * GART
  37 * The GART (Graphics Aperture Remapping Table) is an aperture
  38 * in the GPU's address space.  System pages can be mapped into
  39 * the aperture and look like contiguous pages from the GPU's
  40 * perspective.  A page table maps the pages in the aperture
  41 * to the actual backing pages in system memory.
  42 *
  43 * Radeon GPUs support both an internal GART, as described above,
  44 * and AGP.  AGP works similarly, but the GART table is configured
  45 * and maintained by the northbridge rather than the driver.
  46 * Radeon hw has a separate AGP aperture that is programmed to
  47 * point to the AGP aperture provided by the northbridge and the
  48 * requests are passed through to the northbridge aperture.
  49 * Both AGP and internal GART can be used at the same time, however
  50 * that is not currently supported by the driver.
  51 *
  52 * This file handles the common internal GART management.
  53 */
  54
  55/*
  56 * Common GART table functions.
  57 */
  58
  59/**
  60 * amdgpu_dummy_page_init - init dummy page used by the driver
  61 *
  62 * @adev: amdgpu_device pointer
  63 *
  64 * Allocate the dummy page used by the driver (all asics).
  65 * This dummy page is used by the driver as a filler for gart entries
  66 * when pages are taken out of the GART
  67 * Returns 0 on sucess, -ENOMEM on failure.
  68 */
  69static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
  70{
  71        struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
  72
  73        if (adev->dummy_page_addr)
  74                return 0;
  75        adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
  76                                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  77        if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
  78                dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
  79                adev->dummy_page_addr = 0;
  80                return -ENOMEM;
  81        }
  82        return 0;
  83}
  84
  85/**
  86 * amdgpu_dummy_page_fini - free dummy page used by the driver
  87 *
  88 * @adev: amdgpu_device pointer
  89 *
  90 * Frees the dummy page used by the driver (all asics).
  91 */
  92static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
  93{
  94        if (!adev->dummy_page_addr)
  95                return;
  96        pci_unmap_page(adev->pdev, adev->dummy_page_addr,
  97                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  98        adev->dummy_page_addr = 0;
  99}
 100
 101/**
 102 * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
 103 *
 104 * @adev: amdgpu_device pointer
 105 *
 106 * Allocate video memory for GART page table
 107 * (pcie r4xx, r5xx+).  These asics require the
 108 * gart table to be in video memory.
 109 * Returns 0 for success, error for failure.
 110 */
 111int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
 112{
 113        int r;
 114
 115        if (adev->gart.robj == NULL) {
 116                struct amdgpu_bo_param bp;
 117
 118                memset(&bp, 0, sizeof(bp));
 119                bp.size = adev->gart.table_size;
 120                bp.byte_align = PAGE_SIZE;
 121                bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
 122                bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 123                        AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 124                bp.type = ttm_bo_type_kernel;
 125                bp.resv = NULL;
 126                r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
 127                if (r) {
 128                        return r;
 129                }
 130        }
 131        return 0;
 132}
 133
 134/**
 135 * amdgpu_gart_table_vram_pin - pin gart page table in vram
 136 *
 137 * @adev: amdgpu_device pointer
 138 *
 139 * Pin the GART page table in vram so it will not be moved
 140 * by the memory manager (pcie r4xx, r5xx+).  These asics require the
 141 * gart table to be in video memory.
 142 * Returns 0 for success, error for failure.
 143 */
 144int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
 145{
 146        int r;
 147
 148        r = amdgpu_bo_reserve(adev->gart.robj, false);
 149        if (unlikely(r != 0))
 150                return r;
 151        r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
 152        if (r) {
 153                amdgpu_bo_unreserve(adev->gart.robj);
 154                return r;
 155        }
 156        r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
 157        if (r)
 158                amdgpu_bo_unpin(adev->gart.robj);
 159        amdgpu_bo_unreserve(adev->gart.robj);
 160        adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
 161        return r;
 162}
 163
 164/**
 165 * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
 166 *
 167 * @adev: amdgpu_device pointer
 168 *
 169 * Unpin the GART page table in vram (pcie r4xx, r5xx+).
 170 * These asics require the gart table to be in video memory.
 171 */
 172void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
 173{
 174        int r;
 175
 176        if (adev->gart.robj == NULL) {
 177                return;
 178        }
 179        r = amdgpu_bo_reserve(adev->gart.robj, true);
 180        if (likely(r == 0)) {
 181                amdgpu_bo_kunmap(adev->gart.robj);
 182                amdgpu_bo_unpin(adev->gart.robj);
 183                amdgpu_bo_unreserve(adev->gart.robj);
 184                adev->gart.ptr = NULL;
 185        }
 186}
 187
 188/**
 189 * amdgpu_gart_table_vram_free - free gart page table vram
 190 *
 191 * @adev: amdgpu_device pointer
 192 *
 193 * Free the video memory used for the GART page table
 194 * (pcie r4xx, r5xx+).  These asics require the gart table to
 195 * be in video memory.
 196 */
 197void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
 198{
 199        if (adev->gart.robj == NULL) {
 200                return;
 201        }
 202        amdgpu_bo_unref(&adev->gart.robj);
 203}
 204
 205/*
 206 * Common gart functions.
 207 */
 208/**
 209 * amdgpu_gart_unbind - unbind pages from the gart page table
 210 *
 211 * @adev: amdgpu_device pointer
 212 * @offset: offset into the GPU's gart aperture
 213 * @pages: number of pages to unbind
 214 *
 215 * Unbinds the requested pages from the gart page table and
 216 * replaces them with the dummy page (all asics).
 217 * Returns 0 for success, -EINVAL for failure.
 218 */
 219int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 220                        int pages)
 221{
 222        unsigned t;
 223        unsigned p;
 224        int i, j;
 225        u64 page_base;
 226        /* Starting from VEGA10, system bit must be 0 to mean invalid. */
 227        uint64_t flags = 0;
 228
 229        if (!adev->gart.ready) {
 230                WARN(1, "trying to unbind memory from uninitialized GART !\n");
 231                return -EINVAL;
 232        }
 233
 234        t = offset / AMDGPU_GPU_PAGE_SIZE;
 235        p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 236        for (i = 0; i < pages; i++, p++) {
 237#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 238                adev->gart.pages[p] = NULL;
 239#endif
 240                page_base = adev->dummy_page_addr;
 241                if (!adev->gart.ptr)
 242                        continue;
 243
 244                for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
 245                        amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
 246                                               t, page_base, flags);
 247                        page_base += AMDGPU_GPU_PAGE_SIZE;
 248                }
 249        }
 250        mb();
 251        amdgpu_asic_flush_hdp(adev, NULL);
 252        amdgpu_gmc_flush_gpu_tlb(adev, 0);
 253        return 0;
 254}
 255
 256/**
 257 * amdgpu_gart_map - map dma_addresses into GART entries
 258 *
 259 * @adev: amdgpu_device pointer
 260 * @offset: offset into the GPU's gart aperture
 261 * @pages: number of pages to bind
 262 * @dma_addr: DMA addresses of pages
 263 *
 264 * Map the dma_addresses into GART entries (all asics).
 265 * Returns 0 for success, -EINVAL for failure.
 266 */
 267int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
 268                    int pages, dma_addr_t *dma_addr, uint64_t flags,
 269                    void *dst)
 270{
 271        uint64_t page_base;
 272        unsigned i, j, t;
 273
 274        if (!adev->gart.ready) {
 275                WARN(1, "trying to bind memory to uninitialized GART !\n");
 276                return -EINVAL;
 277        }
 278
 279        t = offset / AMDGPU_GPU_PAGE_SIZE;
 280
 281        for (i = 0; i < pages; i++) {
 282                page_base = dma_addr[i];
 283                for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
 284                        amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
 285                        page_base += AMDGPU_GPU_PAGE_SIZE;
 286                }
 287        }
 288        return 0;
 289}
 290
 291/**
 292 * amdgpu_gart_bind - bind pages into the gart page table
 293 *
 294 * @adev: amdgpu_device pointer
 295 * @offset: offset into the GPU's gart aperture
 296 * @pages: number of pages to bind
 297 * @pagelist: pages to bind
 298 * @dma_addr: DMA addresses of pages
 299 *
 300 * Binds the requested pages to the gart page table
 301 * (all asics).
 302 * Returns 0 for success, -EINVAL for failure.
 303 */
 304int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 305                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
 306                     uint64_t flags)
 307{
 308#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 309        unsigned i,t,p;
 310#endif
 311        int r;
 312
 313        if (!adev->gart.ready) {
 314                WARN(1, "trying to bind memory to uninitialized GART !\n");
 315                return -EINVAL;
 316        }
 317
 318#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 319        t = offset / AMDGPU_GPU_PAGE_SIZE;
 320        p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 321        for (i = 0; i < pages; i++, p++)
 322                adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
 323#endif
 324
 325        if (!adev->gart.ptr)
 326                return 0;
 327
 328        r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
 329                    adev->gart.ptr);
 330        if (r)
 331                return r;
 332
 333        mb();
 334        amdgpu_asic_flush_hdp(adev, NULL);
 335        amdgpu_gmc_flush_gpu_tlb(adev, 0);
 336        return 0;
 337}
 338
 339/**
 340 * amdgpu_gart_init - init the driver info for managing the gart
 341 *
 342 * @adev: amdgpu_device pointer
 343 *
 344 * Allocate the dummy page and init the gart driver info (all asics).
 345 * Returns 0 for success, error for failure.
 346 */
 347int amdgpu_gart_init(struct amdgpu_device *adev)
 348{
 349        int r;
 350
 351        if (adev->dummy_page_addr)
 352                return 0;
 353
 354        /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
 355        if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
 356                DRM_ERROR("Page size is smaller than GPU page size!\n");
 357                return -EINVAL;
 358        }
 359        r = amdgpu_gart_dummy_page_init(adev);
 360        if (r)
 361                return r;
 362        /* Compute table size */
 363        adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
 364        adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
 365        DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
 366                 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
 367
 368#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 369        /* Allocate pages table */
 370        adev->gart.pages = vzalloc(array_size(sizeof(void *),
 371                                              adev->gart.num_cpu_pages));
 372        if (adev->gart.pages == NULL)
 373                return -ENOMEM;
 374#endif
 375
 376        return 0;
 377}
 378
 379/**
 380 * amdgpu_gart_fini - tear down the driver info for managing the gart
 381 *
 382 * @adev: amdgpu_device pointer
 383 *
 384 * Tear down the gart driver info and free the dummy page (all asics).
 385 */
 386void amdgpu_gart_fini(struct amdgpu_device *adev)
 387{
 388#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 389        vfree(adev->gart.pages);
 390        adev->gart.pages = NULL;
 391#endif
 392        amdgpu_gart_dummy_page_fini(adev);
 393}
 394