linux/drivers/gpu/drm/radeon/radeon_ttm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2009 Jerome Glisse.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Jerome Glisse <glisse@freedesktop.org>
  29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30 *    Dave Airlie
  31 */
  32
  33#include <linux/dma-mapping.h>
  34#include <linux/pagemap.h>
  35#include <linux/pci.h>
  36#include <linux/seq_file.h>
  37#include <linux/slab.h>
  38#include <linux/swap.h>
  39#include <linux/swiotlb.h>
  40
  41#include <drm/drm_device.h>
  42#include <drm/drm_file.h>
  43#include <drm/drm_prime.h>
  44#include <drm/radeon_drm.h>
  45#include <drm/ttm/ttm_bo_api.h>
  46#include <drm/ttm/ttm_bo_driver.h>
  47#include <drm/ttm/ttm_placement.h>
  48#include <drm/ttm/ttm_range_manager.h>
  49
  50#include "radeon_reg.h"
  51#include "radeon.h"
  52#include "radeon_ttm.h"
  53
  54static void radeon_ttm_debugfs_init(struct radeon_device *rdev);
  55
  56static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
  57                              struct ttm_resource *bo_mem);
  58static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
  59
  60struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
  61{
  62        struct radeon_mman *mman;
  63        struct radeon_device *rdev;
  64
  65        mman = container_of(bdev, struct radeon_mman, bdev);
  66        rdev = container_of(mman, struct radeon_device, mman);
  67        return rdev;
  68}
  69
  70static int radeon_ttm_init_vram(struct radeon_device *rdev)
  71{
  72        return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
  73                                  false, rdev->mc.real_vram_size >> PAGE_SHIFT);
  74}
  75
  76static int radeon_ttm_init_gtt(struct radeon_device *rdev)
  77{
  78        return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
  79                                  true, rdev->mc.gtt_size >> PAGE_SHIFT);
  80}
  81
  82static void radeon_evict_flags(struct ttm_buffer_object *bo,
  83                                struct ttm_placement *placement)
  84{
  85        static const struct ttm_place placements = {
  86                .fpfn = 0,
  87                .lpfn = 0,
  88                .mem_type = TTM_PL_SYSTEM,
  89                .flags = 0
  90        };
  91
  92        struct radeon_bo *rbo;
  93
  94        if (!radeon_ttm_bo_is_radeon_bo(bo)) {
  95                placement->placement = &placements;
  96                placement->busy_placement = &placements;
  97                placement->num_placement = 1;
  98                placement->num_busy_placement = 1;
  99                return;
 100        }
 101        rbo = container_of(bo, struct radeon_bo, tbo);
 102        switch (bo->resource->mem_type) {
 103        case TTM_PL_VRAM:
 104                if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
 105                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 106                else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
 107                         bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
 108                        unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
 109                        int i;
 110
 111                        /* Try evicting to the CPU inaccessible part of VRAM
 112                         * first, but only set GTT as busy placement, so this
 113                         * BO will be evicted to GTT rather than causing other
 114                         * BOs to be evicted from VRAM
 115                         */
 116                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
 117                                                         RADEON_GEM_DOMAIN_GTT);
 118                        rbo->placement.num_busy_placement = 0;
 119                        for (i = 0; i < rbo->placement.num_placement; i++) {
 120                                if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
 121                                        if (rbo->placements[i].fpfn < fpfn)
 122                                                rbo->placements[i].fpfn = fpfn;
 123                                } else {
 124                                        rbo->placement.busy_placement =
 125                                                &rbo->placements[i];
 126                                        rbo->placement.num_busy_placement = 1;
 127                                }
 128                        }
 129                } else
 130                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
 131                break;
 132        case TTM_PL_TT:
 133        default:
 134                radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 135        }
 136        *placement = rbo->placement;
 137}
 138
 139static int radeon_move_blit(struct ttm_buffer_object *bo,
 140                        bool evict,
 141                        struct ttm_resource *new_mem,
 142                        struct ttm_resource *old_mem)
 143{
 144        struct radeon_device *rdev;
 145        uint64_t old_start, new_start;
 146        struct radeon_fence *fence;
 147        unsigned num_pages;
 148        int r, ridx;
 149
 150        rdev = radeon_get_rdev(bo->bdev);
 151        ridx = radeon_copy_ring_index(rdev);
 152        old_start = (u64)old_mem->start << PAGE_SHIFT;
 153        new_start = (u64)new_mem->start << PAGE_SHIFT;
 154
 155        switch (old_mem->mem_type) {
 156        case TTM_PL_VRAM:
 157                old_start += rdev->mc.vram_start;
 158                break;
 159        case TTM_PL_TT:
 160                old_start += rdev->mc.gtt_start;
 161                break;
 162        default:
 163                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
 164                return -EINVAL;
 165        }
 166        switch (new_mem->mem_type) {
 167        case TTM_PL_VRAM:
 168                new_start += rdev->mc.vram_start;
 169                break;
 170        case TTM_PL_TT:
 171                new_start += rdev->mc.gtt_start;
 172                break;
 173        default:
 174                DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
 175                return -EINVAL;
 176        }
 177        if (!rdev->ring[ridx].ready) {
 178                DRM_ERROR("Trying to move memory with ring turned off.\n");
 179                return -EINVAL;
 180        }
 181
 182        BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 183
 184        num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 185        fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
 186        if (IS_ERR(fence))
 187                return PTR_ERR(fence);
 188
 189        r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
 190        radeon_fence_unref(&fence);
 191        return r;
 192}
 193
 194static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 195                          struct ttm_operation_ctx *ctx,
 196                          struct ttm_resource *new_mem,
 197                          struct ttm_place *hop)
 198{
 199        struct ttm_resource *old_mem = bo->resource;
 200        struct radeon_device *rdev;
 201        struct radeon_bo *rbo;
 202        int r, old_type;
 203
 204        if (new_mem->mem_type == TTM_PL_TT) {
 205                r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
 206                if (r)
 207                        return r;
 208        }
 209
 210        r = ttm_bo_wait_ctx(bo, ctx);
 211        if (r)
 212                return r;
 213
 214        /* Can't move a pinned BO */
 215        rbo = container_of(bo, struct radeon_bo, tbo);
 216        if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
 217                return -EINVAL;
 218
 219        /* Save old type for statistics update */
 220        old_type = old_mem->mem_type;
 221
 222        rdev = radeon_get_rdev(bo->bdev);
 223        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 224                ttm_bo_move_null(bo, new_mem);
 225                goto out;
 226        }
 227        if (old_mem->mem_type == TTM_PL_SYSTEM &&
 228            new_mem->mem_type == TTM_PL_TT) {
 229                ttm_bo_move_null(bo, new_mem);
 230                goto out;
 231        }
 232
 233        if (old_mem->mem_type == TTM_PL_TT &&
 234            new_mem->mem_type == TTM_PL_SYSTEM) {
 235                radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
 236                ttm_resource_free(bo, &bo->resource);
 237                ttm_bo_assign_mem(bo, new_mem);
 238                goto out;
 239        }
 240        if (rdev->ring[radeon_copy_ring_index(rdev)].ready &&
 241            rdev->asic->copy.copy != NULL) {
 242                if ((old_mem->mem_type == TTM_PL_SYSTEM &&
 243                     new_mem->mem_type == TTM_PL_VRAM) ||
 244                    (old_mem->mem_type == TTM_PL_VRAM &&
 245                     new_mem->mem_type == TTM_PL_SYSTEM)) {
 246                        hop->fpfn = 0;
 247                        hop->lpfn = 0;
 248                        hop->mem_type = TTM_PL_TT;
 249                        hop->flags = 0;
 250                        return -EMULTIHOP;
 251                }
 252
 253                r = radeon_move_blit(bo, evict, new_mem, old_mem);
 254        } else {
 255                r = -ENODEV;
 256        }
 257
 258        if (r) {
 259                r = ttm_bo_move_memcpy(bo, ctx, new_mem);
 260                if (r)
 261                        return r;
 262        }
 263
 264out:
 265        /* update statistics */
 266        atomic64_add(bo->base.size, &rdev->num_bytes_moved);
 267        radeon_bo_move_notify(bo, old_type, new_mem);
 268        return 0;
 269}
 270
 271static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
 272{
 273        struct radeon_device *rdev = radeon_get_rdev(bdev);
 274        size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
 275
 276        switch (mem->mem_type) {
 277        case TTM_PL_SYSTEM:
 278                /* system memory */
 279                return 0;
 280        case TTM_PL_TT:
 281#if IS_ENABLED(CONFIG_AGP)
 282                if (rdev->flags & RADEON_IS_AGP) {
 283                        /* RADEON_IS_AGP is set only if AGP is active */
 284                        mem->bus.offset = (mem->start << PAGE_SHIFT) +
 285                                rdev->mc.agp_base;
 286                        mem->bus.is_iomem = !rdev->agp->cant_use_aperture;
 287                        mem->bus.caching = ttm_write_combined;
 288                }
 289#endif
 290                break;
 291        case TTM_PL_VRAM:
 292                mem->bus.offset = mem->start << PAGE_SHIFT;
 293                /* check if it's visible */
 294                if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size)
 295                        return -EINVAL;
 296                mem->bus.offset += rdev->mc.aper_base;
 297                mem->bus.is_iomem = true;
 298                mem->bus.caching = ttm_write_combined;
 299#ifdef __alpha__
 300                /*
 301                 * Alpha: use bus.addr to hold the ioremap() return,
 302                 * so we can modify bus.base below.
 303                 */
 304                mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
 305                if (!mem->bus.addr)
 306                        return -ENOMEM;
 307
 308                /*
 309                 * Alpha: Use just the bus offset plus
 310                 * the hose/domain memory base for bus.base.
 311                 * It then can be used to build PTEs for VRAM
 312                 * access, as done in ttm_bo_vm_fault().
 313                 */
 314                mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
 315                        rdev->hose->dense_mem_base;
 316#endif
 317                break;
 318        default:
 319                return -EINVAL;
 320        }
 321        return 0;
 322}
 323
 324/*
 325 * TTM backend functions.
 326 */
 327struct radeon_ttm_tt {
 328        struct ttm_tt           ttm;
 329        u64                             offset;
 330
 331        uint64_t                        userptr;
 332        struct mm_struct                *usermm;
 333        uint32_t                        userflags;
 334        bool bound;
 335};
 336
 337/* prepare the sg table with the user pages */
 338static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
 339{
 340        struct radeon_device *rdev = radeon_get_rdev(bdev);
 341        struct radeon_ttm_tt *gtt = (void *)ttm;
 342        unsigned pinned = 0;
 343        int r;
 344
 345        int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 346        enum dma_data_direction direction = write ?
 347                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 348
 349        if (current->mm != gtt->usermm)
 350                return -EPERM;
 351
 352        if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
 353                /* check that we only pin down anonymous memory
 354                   to prevent problems with writeback */
 355                unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
 356                struct vm_area_struct *vma;
 357                vma = find_vma(gtt->usermm, gtt->userptr);
 358                if (!vma || vma->vm_file || vma->vm_end < end)
 359                        return -EPERM;
 360        }
 361
 362        do {
 363                unsigned num_pages = ttm->num_pages - pinned;
 364                uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 365                struct page **pages = ttm->pages + pinned;
 366
 367                r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
 368                                   pages, NULL);
 369                if (r < 0)
 370                        goto release_pages;
 371
 372                pinned += r;
 373
 374        } while (pinned < ttm->num_pages);
 375
 376        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 377                                      (u64)ttm->num_pages << PAGE_SHIFT,
 378                                      GFP_KERNEL);
 379        if (r)
 380                goto release_sg;
 381
 382        r = dma_map_sgtable(rdev->dev, ttm->sg, direction, 0);
 383        if (r)
 384                goto release_sg;
 385
 386        drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
 387                                       ttm->num_pages);
 388
 389        return 0;
 390
 391release_sg:
 392        kfree(ttm->sg);
 393
 394release_pages:
 395        release_pages(ttm->pages, pinned);
 396        return r;
 397}
 398
 399static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
 400{
 401        struct radeon_device *rdev = radeon_get_rdev(bdev);
 402        struct radeon_ttm_tt *gtt = (void *)ttm;
 403        struct sg_page_iter sg_iter;
 404
 405        int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 406        enum dma_data_direction direction = write ?
 407                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 408
 409        /* double check that we don't free the table twice */
 410        if (!ttm->sg || !ttm->sg->sgl)
 411                return;
 412
 413        /* free the sg table and pages again */
 414        dma_unmap_sgtable(rdev->dev, ttm->sg, direction, 0);
 415
 416        for_each_sgtable_page(ttm->sg, &sg_iter, 0) {
 417                struct page *page = sg_page_iter_page(&sg_iter);
 418                if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
 419                        set_page_dirty(page);
 420
 421                mark_page_accessed(page);
 422                put_page(page);
 423        }
 424
 425        sg_free_table(ttm->sg);
 426}
 427
 428static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
 429{
 430        struct radeon_ttm_tt *gtt = (void*)ttm;
 431
 432        return (gtt->bound);
 433}
 434
 435static int radeon_ttm_backend_bind(struct ttm_device *bdev,
 436                                   struct ttm_tt *ttm,
 437                                   struct ttm_resource *bo_mem)
 438{
 439        struct radeon_ttm_tt *gtt = (void*)ttm;
 440        struct radeon_device *rdev = radeon_get_rdev(bdev);
 441        uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
 442                RADEON_GART_PAGE_WRITE;
 443        int r;
 444
 445        if (gtt->bound)
 446                return 0;
 447
 448        if (gtt->userptr) {
 449                radeon_ttm_tt_pin_userptr(bdev, ttm);
 450                flags &= ~RADEON_GART_PAGE_WRITE;
 451        }
 452
 453        gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
 454        if (!ttm->num_pages) {
 455                WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
 456                     ttm->num_pages, bo_mem, ttm);
 457        }
 458        if (ttm->caching == ttm_cached)
 459                flags |= RADEON_GART_PAGE_SNOOP;
 460        r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
 461                             ttm->pages, gtt->ttm.dma_address, flags);
 462        if (r) {
 463                DRM_ERROR("failed to bind %u pages at 0x%08X\n",
 464                          ttm->num_pages, (unsigned)gtt->offset);
 465                return r;
 466        }
 467        gtt->bound = true;
 468        return 0;
 469}
 470
 471static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
 472{
 473        struct radeon_ttm_tt *gtt = (void *)ttm;
 474        struct radeon_device *rdev = radeon_get_rdev(bdev);
 475
 476        if (gtt->userptr)
 477                radeon_ttm_tt_unpin_userptr(bdev, ttm);
 478
 479        if (!gtt->bound)
 480                return;
 481
 482        radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
 483
 484        gtt->bound = false;
 485}
 486
 487static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
 488{
 489        struct radeon_ttm_tt *gtt = (void *)ttm;
 490
 491        radeon_ttm_backend_unbind(bdev, ttm);
 492        ttm_tt_destroy_common(bdev, ttm);
 493
 494        ttm_tt_fini(&gtt->ttm);
 495        kfree(gtt);
 496}
 497
 498static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
 499                                           uint32_t page_flags)
 500{
 501        struct radeon_ttm_tt *gtt;
 502        enum ttm_caching caching;
 503        struct radeon_bo *rbo;
 504#if IS_ENABLED(CONFIG_AGP)
 505        struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
 506
 507        if (rdev->flags & RADEON_IS_AGP) {
 508                return ttm_agp_tt_create(bo, rdev->agp->bridge, page_flags);
 509        }
 510#endif
 511        rbo = container_of(bo, struct radeon_bo, tbo);
 512
 513        gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
 514        if (gtt == NULL) {
 515                return NULL;
 516        }
 517
 518        if (rbo->flags & RADEON_GEM_GTT_UC)
 519                caching = ttm_uncached;
 520        else if (rbo->flags & RADEON_GEM_GTT_WC)
 521                caching = ttm_write_combined;
 522        else
 523                caching = ttm_cached;
 524
 525        if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
 526                kfree(gtt);
 527                return NULL;
 528        }
 529        return &gtt->ttm;
 530}
 531
 532static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
 533                                                  struct ttm_tt *ttm)
 534{
 535#if IS_ENABLED(CONFIG_AGP)
 536        if (rdev->flags & RADEON_IS_AGP)
 537                return NULL;
 538#endif
 539
 540        if (!ttm)
 541                return NULL;
 542        return container_of(ttm, struct radeon_ttm_tt, ttm);
 543}
 544
 545static int radeon_ttm_tt_populate(struct ttm_device *bdev,
 546                                  struct ttm_tt *ttm,
 547                                  struct ttm_operation_ctx *ctx)
 548{
 549        struct radeon_device *rdev = radeon_get_rdev(bdev);
 550        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
 551        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 552
 553        if (gtt && gtt->userptr) {
 554                ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 555                if (!ttm->sg)
 556                        return -ENOMEM;
 557
 558                ttm->page_flags |= TTM_PAGE_FLAG_SG;
 559                return 0;
 560        }
 561
 562        if (slave && ttm->sg) {
 563                drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
 564                                               ttm->num_pages);
 565                return 0;
 566        }
 567
 568        return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
 569}
 570
 571static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
 572{
 573        struct radeon_device *rdev = radeon_get_rdev(bdev);
 574        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
 575        bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 576
 577        if (gtt && gtt->userptr) {
 578                kfree(ttm->sg);
 579                ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
 580                return;
 581        }
 582
 583        if (slave)
 584                return;
 585
 586        return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
 587}
 588
 589int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
 590                              struct ttm_tt *ttm, uint64_t addr,
 591                              uint32_t flags)
 592{
 593        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
 594
 595        if (gtt == NULL)
 596                return -EINVAL;
 597
 598        gtt->userptr = addr;
 599        gtt->usermm = current->mm;
 600        gtt->userflags = flags;
 601        return 0;
 602}
 603
 604bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
 605                            struct ttm_tt *ttm)
 606{
 607#if IS_ENABLED(CONFIG_AGP)
 608        struct radeon_device *rdev = radeon_get_rdev(bdev);
 609        if (rdev->flags & RADEON_IS_AGP)
 610                return ttm_agp_is_bound(ttm);
 611#endif
 612        return radeon_ttm_backend_is_bound(ttm);
 613}
 614
 615static int radeon_ttm_tt_bind(struct ttm_device *bdev,
 616                              struct ttm_tt *ttm,
 617                              struct ttm_resource *bo_mem)
 618{
 619#if IS_ENABLED(CONFIG_AGP)
 620        struct radeon_device *rdev = radeon_get_rdev(bdev);
 621#endif
 622
 623        if (!bo_mem)
 624                return -EINVAL;
 625#if IS_ENABLED(CONFIG_AGP)
 626        if (rdev->flags & RADEON_IS_AGP)
 627                return ttm_agp_bind(ttm, bo_mem);
 628#endif
 629
 630        return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
 631}
 632
 633static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
 634                                 struct ttm_tt *ttm)
 635{
 636#if IS_ENABLED(CONFIG_AGP)
 637        struct radeon_device *rdev = radeon_get_rdev(bdev);
 638
 639        if (rdev->flags & RADEON_IS_AGP) {
 640                ttm_agp_unbind(ttm);
 641                return;
 642        }
 643#endif
 644        radeon_ttm_backend_unbind(bdev, ttm);
 645}
 646
 647static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
 648                                  struct ttm_tt *ttm)
 649{
 650#if IS_ENABLED(CONFIG_AGP)
 651        struct radeon_device *rdev = radeon_get_rdev(bdev);
 652
 653        if (rdev->flags & RADEON_IS_AGP) {
 654                ttm_agp_unbind(ttm);
 655                ttm_tt_destroy_common(bdev, ttm);
 656                ttm_agp_destroy(ttm);
 657                return;
 658        }
 659#endif
 660        radeon_ttm_backend_destroy(bdev, ttm);
 661}
 662
 663bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev,
 664                               struct ttm_tt *ttm)
 665{
 666        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
 667
 668        if (gtt == NULL)
 669                return false;
 670
 671        return !!gtt->userptr;
 672}
 673
 674bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
 675                               struct ttm_tt *ttm)
 676{
 677        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
 678
 679        if (gtt == NULL)
 680                return false;
 681
 682        return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 683}
 684
 685static void
 686radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
 687{
 688        unsigned int old_type = TTM_PL_SYSTEM;
 689
 690        if (bo->resource)
 691                old_type = bo->resource->mem_type;
 692        radeon_bo_move_notify(bo, old_type, NULL);
 693}
 694
 695static struct ttm_device_funcs radeon_bo_driver = {
 696        .ttm_tt_create = &radeon_ttm_tt_create,
 697        .ttm_tt_populate = &radeon_ttm_tt_populate,
 698        .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
 699        .ttm_tt_destroy = &radeon_ttm_tt_destroy,
 700        .eviction_valuable = ttm_bo_eviction_valuable,
 701        .evict_flags = &radeon_evict_flags,
 702        .move = &radeon_bo_move,
 703        .delete_mem_notify = &radeon_bo_delete_mem_notify,
 704        .io_mem_reserve = &radeon_ttm_io_mem_reserve,
 705};
 706
 707int radeon_ttm_init(struct radeon_device *rdev)
 708{
 709        int r;
 710
 711        /* No others user of address space so set it to 0 */
 712        r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
 713                               rdev->ddev->anon_inode->i_mapping,
 714                               rdev->ddev->vma_offset_manager,
 715                               rdev->need_swiotlb,
 716                               dma_addressing_limited(&rdev->pdev->dev));
 717        if (r) {
 718                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 719                return r;
 720        }
 721        rdev->mman.initialized = true;
 722
 723        r = radeon_ttm_init_vram(rdev);
 724        if (r) {
 725                DRM_ERROR("Failed initializing VRAM heap.\n");
 726                return r;
 727        }
 728        /* Change the size here instead of the init above so only lpfn is affected */
 729        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 730
 731        r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
 732                             RADEON_GEM_DOMAIN_VRAM, 0, NULL,
 733                             NULL, &rdev->stolen_vga_memory);
 734        if (r) {
 735                return r;
 736        }
 737        r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
 738        if (r)
 739                return r;
 740        r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
 741        radeon_bo_unreserve(rdev->stolen_vga_memory);
 742        if (r) {
 743                radeon_bo_unref(&rdev->stolen_vga_memory);
 744                return r;
 745        }
 746        DRM_INFO("radeon: %uM of VRAM memory ready\n",
 747                 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
 748
 749        r = radeon_ttm_init_gtt(rdev);
 750        if (r) {
 751                DRM_ERROR("Failed initializing GTT heap.\n");
 752                return r;
 753        }
 754        DRM_INFO("radeon: %uM of GTT memory ready.\n",
 755                 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
 756
 757        radeon_ttm_debugfs_init(rdev);
 758
 759        return 0;
 760}
 761
 762void radeon_ttm_fini(struct radeon_device *rdev)
 763{
 764        int r;
 765
 766        if (!rdev->mman.initialized)
 767                return;
 768
 769        if (rdev->stolen_vga_memory) {
 770                r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
 771                if (r == 0) {
 772                        radeon_bo_unpin(rdev->stolen_vga_memory);
 773                        radeon_bo_unreserve(rdev->stolen_vga_memory);
 774                }
 775                radeon_bo_unref(&rdev->stolen_vga_memory);
 776        }
 777        ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
 778        ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
 779        ttm_device_fini(&rdev->mman.bdev);
 780        radeon_gart_fini(rdev);
 781        rdev->mman.initialized = false;
 782        DRM_INFO("radeon: ttm finalized\n");
 783}
 784
 785/* this should only be called at bootup or when userspace
 786 * isn't running */
 787void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 788{
 789        struct ttm_resource_manager *man;
 790
 791        if (!rdev->mman.initialized)
 792                return;
 793
 794        man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
 795        /* this just adjusts TTM size idea, which sets lpfn to the correct value */
 796        man->size = size >> PAGE_SHIFT;
 797}
 798
 799#if defined(CONFIG_DEBUG_FS)
 800
 801static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
 802{
 803        struct radeon_device *rdev = (struct radeon_device *)m->private;
 804        struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
 805                                                            TTM_PL_VRAM);
 806        struct drm_printer p = drm_seq_file_printer(m);
 807
 808        man->func->debug(man, &p);
 809        return 0;
 810}
 811
 812static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
 813{
 814        struct radeon_device *rdev = (struct radeon_device *)m->private;
 815
 816        return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
 817}
 818
 819static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused)
 820{
 821        struct radeon_device *rdev = (struct radeon_device *)m->private;
 822        struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
 823                                                            TTM_PL_TT);
 824        struct drm_printer p = drm_seq_file_printer(m);
 825
 826        man->func->debug(man, &p);
 827        return 0;
 828}
 829
 830DEFINE_SHOW_ATTRIBUTE(radeon_mm_vram_dump_table);
 831DEFINE_SHOW_ATTRIBUTE(radeon_mm_gtt_dump_table);
 832DEFINE_SHOW_ATTRIBUTE(radeon_ttm_page_pool);
 833
 834static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
 835{
 836        struct radeon_device *rdev = inode->i_private;
 837        i_size_write(inode, rdev->mc.mc_vram_size);
 838        filep->private_data = inode->i_private;
 839        return 0;
 840}
 841
 842static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
 843                                    size_t size, loff_t *pos)
 844{
 845        struct radeon_device *rdev = f->private_data;
 846        ssize_t result = 0;
 847        int r;
 848
 849        if (size & 0x3 || *pos & 0x3)
 850                return -EINVAL;
 851
 852        while (size) {
 853                unsigned long flags;
 854                uint32_t value;
 855
 856                if (*pos >= rdev->mc.mc_vram_size)
 857                        return result;
 858
 859                spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
 860                WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
 861                if (rdev->family >= CHIP_CEDAR)
 862                        WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
 863                value = RREG32(RADEON_MM_DATA);
 864                spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
 865
 866                r = put_user(value, (uint32_t __user *)buf);
 867                if (r)
 868                        return r;
 869
 870                result += 4;
 871                buf += 4;
 872                *pos += 4;
 873                size -= 4;
 874        }
 875
 876        return result;
 877}
 878
 879static const struct file_operations radeon_ttm_vram_fops = {
 880        .owner = THIS_MODULE,
 881        .open = radeon_ttm_vram_open,
 882        .read = radeon_ttm_vram_read,
 883        .llseek = default_llseek
 884};
 885
 886static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
 887{
 888        struct radeon_device *rdev = inode->i_private;
 889        i_size_write(inode, rdev->mc.gtt_size);
 890        filep->private_data = inode->i_private;
 891        return 0;
 892}
 893
 894static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
 895                                   size_t size, loff_t *pos)
 896{
 897        struct radeon_device *rdev = f->private_data;
 898        ssize_t result = 0;
 899        int r;
 900
 901        while (size) {
 902                loff_t p = *pos / PAGE_SIZE;
 903                unsigned off = *pos & ~PAGE_MASK;
 904                size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
 905                struct page *page;
 906                void *ptr;
 907
 908                if (p >= rdev->gart.num_cpu_pages)
 909                        return result;
 910
 911                page = rdev->gart.pages[p];
 912                if (page) {
 913                        ptr = kmap(page);
 914                        ptr += off;
 915
 916                        r = copy_to_user(buf, ptr, cur_size);
 917                        kunmap(rdev->gart.pages[p]);
 918                } else
 919                        r = clear_user(buf, cur_size);
 920
 921                if (r)
 922                        return -EFAULT;
 923
 924                result += cur_size;
 925                buf += cur_size;
 926                *pos += cur_size;
 927                size -= cur_size;
 928        }
 929
 930        return result;
 931}
 932
 933static const struct file_operations radeon_ttm_gtt_fops = {
 934        .owner = THIS_MODULE,
 935        .open = radeon_ttm_gtt_open,
 936        .read = radeon_ttm_gtt_read,
 937        .llseek = default_llseek
 938};
 939
 940#endif
 941
 942static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
 943{
 944#if defined(CONFIG_DEBUG_FS)
 945        struct drm_minor *minor = rdev->ddev->primary;
 946        struct dentry *root = minor->debugfs_root;
 947
 948        debugfs_create_file("radeon_vram", 0444, root, rdev,
 949                            &radeon_ttm_vram_fops);
 950
 951        debugfs_create_file("radeon_gtt", 0444, root, rdev,
 952                            &radeon_ttm_gtt_fops);
 953
 954        debugfs_create_file("radeon_vram_mm", 0444, root, rdev,
 955                            &radeon_mm_vram_dump_table_fops);
 956        debugfs_create_file("radeon_gtt_mm", 0444, root, rdev,
 957                            &radeon_mm_gtt_dump_table_fops);
 958        debugfs_create_file("ttm_page_pool", 0444, root, rdev,
 959                            &radeon_ttm_page_pool_fops);
 960#endif
 961}
 962