linux/drivers/gpu/drm/ttm/ttm_bo_util.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <drm/ttm/ttm_placement.h>
  34#include <drm/drm_vma_manager.h>
  35#include <linux/io.h>
  36#include <linux/highmem.h>
  37#include <linux/wait.h>
  38#include <linux/slab.h>
  39#include <linux/vmalloc.h>
  40#include <linux/module.h>
  41#include <linux/reservation.h>
  42
  43struct ttm_transfer_obj {
  44        struct ttm_buffer_object base;
  45        struct ttm_buffer_object *bo;
  46};
  47
  48void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
  49{
  50        ttm_bo_mem_put(bo, &bo->mem);
  51}
  52
  53int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  54                   struct ttm_operation_ctx *ctx,
  55                    struct ttm_mem_reg *new_mem)
  56{
  57        struct ttm_tt *ttm = bo->ttm;
  58        struct ttm_mem_reg *old_mem = &bo->mem;
  59        int ret;
  60
  61        if (old_mem->mem_type != TTM_PL_SYSTEM) {
  62                ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
  63
  64                if (unlikely(ret != 0)) {
  65                        if (ret != -ERESTARTSYS)
  66                                pr_err("Failed to expire sync object before unbinding TTM\n");
  67                        return ret;
  68                }
  69
  70                ttm_tt_unbind(ttm);
  71                ttm_bo_free_old_node(bo);
  72                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
  73                                TTM_PL_MASK_MEM);
  74                old_mem->mem_type = TTM_PL_SYSTEM;
  75        }
  76
  77        ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
  78        if (unlikely(ret != 0))
  79                return ret;
  80
  81        if (new_mem->mem_type != TTM_PL_SYSTEM) {
  82                ret = ttm_tt_bind(ttm, new_mem, ctx);
  83                if (unlikely(ret != 0))
  84                        return ret;
  85        }
  86
  87        *old_mem = *new_mem;
  88        new_mem->mm_node = NULL;
  89
  90        return 0;
  91}
  92EXPORT_SYMBOL(ttm_bo_move_ttm);
  93
  94int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
  95{
  96        if (likely(man->io_reserve_fastpath))
  97                return 0;
  98
  99        if (interruptible)
 100                return mutex_lock_interruptible(&man->io_reserve_mutex);
 101
 102        mutex_lock(&man->io_reserve_mutex);
 103        return 0;
 104}
 105EXPORT_SYMBOL(ttm_mem_io_lock);
 106
 107void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 108{
 109        if (likely(man->io_reserve_fastpath))
 110                return;
 111
 112        mutex_unlock(&man->io_reserve_mutex);
 113}
 114EXPORT_SYMBOL(ttm_mem_io_unlock);
 115
 116static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
 117{
 118        struct ttm_buffer_object *bo;
 119
 120        if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
 121                return -EAGAIN;
 122
 123        bo = list_first_entry(&man->io_reserve_lru,
 124                              struct ttm_buffer_object,
 125                              io_reserve_lru);
 126        list_del_init(&bo->io_reserve_lru);
 127        ttm_bo_unmap_virtual_locked(bo);
 128
 129        return 0;
 130}
 131
 132
 133int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
 134                       struct ttm_mem_reg *mem)
 135{
 136        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 137        int ret = 0;
 138
 139        if (!bdev->driver->io_mem_reserve)
 140                return 0;
 141        if (likely(man->io_reserve_fastpath))
 142                return bdev->driver->io_mem_reserve(bdev, mem);
 143
 144        if (bdev->driver->io_mem_reserve &&
 145            mem->bus.io_reserved_count++ == 0) {
 146retry:
 147                ret = bdev->driver->io_mem_reserve(bdev, mem);
 148                if (ret == -EAGAIN) {
 149                        ret = ttm_mem_io_evict(man);
 150                        if (ret == 0)
 151                                goto retry;
 152                }
 153        }
 154        return ret;
 155}
 156EXPORT_SYMBOL(ttm_mem_io_reserve);
 157
 158void ttm_mem_io_free(struct ttm_bo_device *bdev,
 159                     struct ttm_mem_reg *mem)
 160{
 161        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 162
 163        if (likely(man->io_reserve_fastpath))
 164                return;
 165
 166        if (bdev->driver->io_mem_reserve &&
 167            --mem->bus.io_reserved_count == 0 &&
 168            bdev->driver->io_mem_free)
 169                bdev->driver->io_mem_free(bdev, mem);
 170
 171}
 172EXPORT_SYMBOL(ttm_mem_io_free);
 173
 174int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
 175{
 176        struct ttm_mem_reg *mem = &bo->mem;
 177        int ret;
 178
 179        if (!mem->bus.io_reserved_vm) {
 180                struct ttm_mem_type_manager *man =
 181                        &bo->bdev->man[mem->mem_type];
 182
 183                ret = ttm_mem_io_reserve(bo->bdev, mem);
 184                if (unlikely(ret != 0))
 185                        return ret;
 186                mem->bus.io_reserved_vm = true;
 187                if (man->use_io_reserve_lru)
 188                        list_add_tail(&bo->io_reserve_lru,
 189                                      &man->io_reserve_lru);
 190        }
 191        return 0;
 192}
 193
 194void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
 195{
 196        struct ttm_mem_reg *mem = &bo->mem;
 197
 198        if (mem->bus.io_reserved_vm) {
 199                mem->bus.io_reserved_vm = false;
 200                list_del_init(&bo->io_reserve_lru);
 201                ttm_mem_io_free(bo->bdev, mem);
 202        }
 203}
 204
 205static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 206                        void **virtual)
 207{
 208        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 209        int ret;
 210        void *addr;
 211
 212        *virtual = NULL;
 213        (void) ttm_mem_io_lock(man, false);
 214        ret = ttm_mem_io_reserve(bdev, mem);
 215        ttm_mem_io_unlock(man);
 216        if (ret || !mem->bus.is_iomem)
 217                return ret;
 218
 219        if (mem->bus.addr) {
 220                addr = mem->bus.addr;
 221        } else {
 222                if (mem->placement & TTM_PL_FLAG_WC)
 223                        addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
 224                else
 225                        addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
 226                if (!addr) {
 227                        (void) ttm_mem_io_lock(man, false);
 228                        ttm_mem_io_free(bdev, mem);
 229                        ttm_mem_io_unlock(man);
 230                        return -ENOMEM;
 231                }
 232        }
 233        *virtual = addr;
 234        return 0;
 235}
 236
 237static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 238                         void *virtual)
 239{
 240        struct ttm_mem_type_manager *man;
 241
 242        man = &bdev->man[mem->mem_type];
 243
 244        if (virtual && mem->bus.addr == NULL)
 245                iounmap(virtual);
 246        (void) ttm_mem_io_lock(man, false);
 247        ttm_mem_io_free(bdev, mem);
 248        ttm_mem_io_unlock(man);
 249}
 250
 251static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
 252{
 253        uint32_t *dstP =
 254            (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
 255        uint32_t *srcP =
 256            (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
 257
 258        int i;
 259        for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
 260                iowrite32(ioread32(srcP++), dstP++);
 261        return 0;
 262}
 263
 264#ifdef CONFIG_X86
 265#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
 266#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
 267#else
 268#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
 269#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
 270#endif
 271
 272
 273/**
 274 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
 275 * specified page protection.
 276 *
 277 * @page: The page to map.
 278 * @prot: The page protection.
 279 *
 280 * This function maps a TTM page using the kmap_atomic api if available,
 281 * otherwise falls back to vmap. The user must make sure that the
 282 * specified page does not have an aliased mapping with a different caching
 283 * policy unless the architecture explicitly allows it. Also mapping and
 284 * unmapping using this api must be correctly nested. Unmapping should
 285 * occur in the reverse order of mapping.
 286 */
 287void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
 288{
 289        if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
 290                return kmap_atomic(page);
 291        else
 292                return __ttm_kmap_atomic_prot(page, prot);
 293}
 294EXPORT_SYMBOL(ttm_kmap_atomic_prot);
 295
 296/**
 297 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
 298 * ttm_kmap_atomic_prot.
 299 *
 300 * @addr: The virtual address from the map.
 301 * @prot: The page protection.
 302 */
 303void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
 304{
 305        if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
 306                kunmap_atomic(addr);
 307        else
 308                __ttm_kunmap_atomic(addr);
 309}
 310EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
 311
 312static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
 313                                unsigned long page,
 314                                pgprot_t prot)
 315{
 316        struct page *d = ttm->pages[page];
 317        void *dst;
 318
 319        if (!d)
 320                return -ENOMEM;
 321
 322        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 323        dst = ttm_kmap_atomic_prot(d, prot);
 324        if (!dst)
 325                return -ENOMEM;
 326
 327        memcpy_fromio(dst, src, PAGE_SIZE);
 328
 329        ttm_kunmap_atomic_prot(dst, prot);
 330
 331        return 0;
 332}
 333
 334static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 335                                unsigned long page,
 336                                pgprot_t prot)
 337{
 338        struct page *s = ttm->pages[page];
 339        void *src;
 340
 341        if (!s)
 342                return -ENOMEM;
 343
 344        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
 345        src = ttm_kmap_atomic_prot(s, prot);
 346        if (!src)
 347                return -ENOMEM;
 348
 349        memcpy_toio(dst, src, PAGE_SIZE);
 350
 351        ttm_kunmap_atomic_prot(src, prot);
 352
 353        return 0;
 354}
 355
 356int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 357                       struct ttm_operation_ctx *ctx,
 358                       struct ttm_mem_reg *new_mem)
 359{
 360        struct ttm_bo_device *bdev = bo->bdev;
 361        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 362        struct ttm_tt *ttm = bo->ttm;
 363        struct ttm_mem_reg *old_mem = &bo->mem;
 364        struct ttm_mem_reg old_copy = *old_mem;
 365        void *old_iomap;
 366        void *new_iomap;
 367        int ret;
 368        unsigned long i;
 369        unsigned long page;
 370        unsigned long add = 0;
 371        int dir;
 372
 373        ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
 374        if (ret)
 375                return ret;
 376
 377        ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
 378        if (ret)
 379                return ret;
 380        ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
 381        if (ret)
 382                goto out;
 383
 384        /*
 385         * Single TTM move. NOP.
 386         */
 387        if (old_iomap == NULL && new_iomap == NULL)
 388                goto out2;
 389
 390        /*
 391         * Don't move nonexistent data. Clear destination instead.
 392         */
 393        if (old_iomap == NULL &&
 394            (ttm == NULL || (ttm->state == tt_unpopulated &&
 395                             !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
 396                memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
 397                goto out2;
 398        }
 399
 400        /*
 401         * TTM might be null for moves within the same region.
 402         */
 403        if (ttm) {
 404                ret = ttm_tt_populate(ttm, ctx);
 405                if (ret)
 406                        goto out1;
 407        }
 408
 409        add = 0;
 410        dir = 1;
 411
 412        if ((old_mem->mem_type == new_mem->mem_type) &&
 413            (new_mem->start < old_mem->start + old_mem->size)) {
 414                dir = -1;
 415                add = new_mem->num_pages - 1;
 416        }
 417
 418        for (i = 0; i < new_mem->num_pages; ++i) {
 419                page = i * dir + add;
 420                if (old_iomap == NULL) {
 421                        pgprot_t prot = ttm_io_prot(old_mem->placement,
 422                                                    PAGE_KERNEL);
 423                        ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
 424                                                   prot);
 425                } else if (new_iomap == NULL) {
 426                        pgprot_t prot = ttm_io_prot(new_mem->placement,
 427                                                    PAGE_KERNEL);
 428                        ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
 429                                                   prot);
 430                } else {
 431                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
 432                }
 433                if (ret)
 434                        goto out1;
 435        }
 436        mb();
 437out2:
 438        old_copy = *old_mem;
 439        *old_mem = *new_mem;
 440        new_mem->mm_node = NULL;
 441
 442        if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 443                ttm_tt_destroy(ttm);
 444                bo->ttm = NULL;
 445        }
 446
 447out1:
 448        ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
 449out:
 450        ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
 451
 452        /*
 453         * On error, keep the mm node!
 454         */
 455        if (!ret)
 456                ttm_bo_mem_put(bo, &old_copy);
 457        return ret;
 458}
 459EXPORT_SYMBOL(ttm_bo_move_memcpy);
 460
 461static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
 462{
 463        struct ttm_transfer_obj *fbo;
 464
 465        fbo = container_of(bo, struct ttm_transfer_obj, base);
 466        ttm_bo_put(fbo->bo);
 467        kfree(fbo);
 468}
 469
 470/**
 471 * ttm_buffer_object_transfer
 472 *
 473 * @bo: A pointer to a struct ttm_buffer_object.
 474 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 475 * holding the data of @bo with the old placement.
 476 *
 477 * This is a utility function that may be called after an accelerated move
 478 * has been scheduled. A new buffer object is created as a placeholder for
 479 * the old data while it's being copied. When that buffer object is idle,
 480 * it can be destroyed, releasing the space of the old placement.
 481 * Returns:
 482 * !0: Failure.
 483 */
 484
 485static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 486                                      struct ttm_buffer_object **new_obj)
 487{
 488        struct ttm_transfer_obj *fbo;
 489        int ret;
 490
 491        fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
 492        if (!fbo)
 493                return -ENOMEM;
 494
 495        ttm_bo_get(bo);
 496        fbo->base = *bo;
 497        fbo->bo = bo;
 498
 499        /**
 500         * Fix up members that we shouldn't copy directly:
 501         * TODO: Explicit member copy would probably be better here.
 502         */
 503
 504        atomic_inc(&bo->bdev->glob->bo_count);
 505        INIT_LIST_HEAD(&fbo->base.ddestroy);
 506        INIT_LIST_HEAD(&fbo->base.lru);
 507        INIT_LIST_HEAD(&fbo->base.swap);
 508        INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
 509        mutex_init(&fbo->base.wu_mutex);
 510        fbo->base.moving = NULL;
 511        drm_vma_node_reset(&fbo->base.vma_node);
 512        atomic_set(&fbo->base.cpu_writers, 0);
 513
 514        kref_init(&fbo->base.list_kref);
 515        kref_init(&fbo->base.kref);
 516        fbo->base.destroy = &ttm_transfered_destroy;
 517        fbo->base.acc_size = 0;
 518        fbo->base.resv = &fbo->base.ttm_resv;
 519        reservation_object_init(fbo->base.resv);
 520        ret = reservation_object_trylock(fbo->base.resv);
 521        WARN_ON(!ret);
 522
 523        *new_obj = &fbo->base;
 524        return 0;
 525}
 526
 527pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 528{
 529        /* Cached mappings need no adjustment */
 530        if (caching_flags & TTM_PL_FLAG_CACHED)
 531                return tmp;
 532
 533#if defined(__i386__) || defined(__x86_64__)
 534        if (caching_flags & TTM_PL_FLAG_WC)
 535                tmp = pgprot_writecombine(tmp);
 536        else if (boot_cpu_data.x86 > 3)
 537                tmp = pgprot_noncached(tmp);
 538#endif
 539#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
 540    defined(__powerpc__)
 541        if (caching_flags & TTM_PL_FLAG_WC)
 542                tmp = pgprot_writecombine(tmp);
 543        else
 544                tmp = pgprot_noncached(tmp);
 545#endif
 546#if defined(__sparc__) || defined(__mips__)
 547        tmp = pgprot_noncached(tmp);
 548#endif
 549        return tmp;
 550}
 551EXPORT_SYMBOL(ttm_io_prot);
 552
 553static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
 554                          unsigned long offset,
 555                          unsigned long size,
 556                          struct ttm_bo_kmap_obj *map)
 557{
 558        struct ttm_mem_reg *mem = &bo->mem;
 559
 560        if (bo->mem.bus.addr) {
 561                map->bo_kmap_type = ttm_bo_map_premapped;
 562                map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
 563        } else {
 564                map->bo_kmap_type = ttm_bo_map_iomap;
 565                if (mem->placement & TTM_PL_FLAG_WC)
 566                        map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
 567                                                  size);
 568                else
 569                        map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
 570                                                       size);
 571        }
 572        return (!map->virtual) ? -ENOMEM : 0;
 573}
 574
 575static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 576                           unsigned long start_page,
 577                           unsigned long num_pages,
 578                           struct ttm_bo_kmap_obj *map)
 579{
 580        struct ttm_mem_reg *mem = &bo->mem;
 581        struct ttm_operation_ctx ctx = {
 582                .interruptible = false,
 583                .no_wait_gpu = false
 584        };
 585        struct ttm_tt *ttm = bo->ttm;
 586        pgprot_t prot;
 587        int ret;
 588
 589        BUG_ON(!ttm);
 590
 591        ret = ttm_tt_populate(ttm, &ctx);
 592        if (ret)
 593                return ret;
 594
 595        if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
 596                /*
 597                 * We're mapping a single page, and the desired
 598                 * page protection is consistent with the bo.
 599                 */
 600
 601                map->bo_kmap_type = ttm_bo_map_kmap;
 602                map->page = ttm->pages[start_page];
 603                map->virtual = kmap(map->page);
 604        } else {
 605                /*
 606                 * We need to use vmap to get the desired page protection
 607                 * or to make the buffer object look contiguous.
 608                 */
 609                prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
 610                map->bo_kmap_type = ttm_bo_map_vmap;
 611                map->virtual = vmap(ttm->pages + start_page, num_pages,
 612                                    0, prot);
 613        }
 614        return (!map->virtual) ? -ENOMEM : 0;
 615}
 616
 617int ttm_bo_kmap(struct ttm_buffer_object *bo,
 618                unsigned long start_page, unsigned long num_pages,
 619                struct ttm_bo_kmap_obj *map)
 620{
 621        struct ttm_mem_type_manager *man =
 622                &bo->bdev->man[bo->mem.mem_type];
 623        unsigned long offset, size;
 624        int ret;
 625
 626        map->virtual = NULL;
 627        map->bo = bo;
 628        if (num_pages > bo->num_pages)
 629                return -EINVAL;
 630        if (start_page > bo->num_pages)
 631                return -EINVAL;
 632#if 0
 633        if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
 634                return -EPERM;
 635#endif
 636        (void) ttm_mem_io_lock(man, false);
 637        ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
 638        ttm_mem_io_unlock(man);
 639        if (ret)
 640                return ret;
 641        if (!bo->mem.bus.is_iomem) {
 642                return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
 643        } else {
 644                offset = start_page << PAGE_SHIFT;
 645                size = num_pages << PAGE_SHIFT;
 646                return ttm_bo_ioremap(bo, offset, size, map);
 647        }
 648}
 649EXPORT_SYMBOL(ttm_bo_kmap);
 650
 651void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 652{
 653        struct ttm_buffer_object *bo = map->bo;
 654        struct ttm_mem_type_manager *man =
 655                &bo->bdev->man[bo->mem.mem_type];
 656
 657        if (!map->virtual)
 658                return;
 659        switch (map->bo_kmap_type) {
 660        case ttm_bo_map_iomap:
 661                iounmap(map->virtual);
 662                break;
 663        case ttm_bo_map_vmap:
 664                vunmap(map->virtual);
 665                break;
 666        case ttm_bo_map_kmap:
 667                kunmap(map->page);
 668                break;
 669        case ttm_bo_map_premapped:
 670                break;
 671        default:
 672                BUG();
 673        }
 674        (void) ttm_mem_io_lock(man, false);
 675        ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
 676        ttm_mem_io_unlock(man);
 677        map->virtual = NULL;
 678        map->page = NULL;
 679}
 680EXPORT_SYMBOL(ttm_bo_kunmap);
 681
 682int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 683                              struct dma_fence *fence,
 684                              bool evict,
 685                              struct ttm_mem_reg *new_mem)
 686{
 687        struct ttm_bo_device *bdev = bo->bdev;
 688        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 689        struct ttm_mem_reg *old_mem = &bo->mem;
 690        int ret;
 691        struct ttm_buffer_object *ghost_obj;
 692
 693        reservation_object_add_excl_fence(bo->resv, fence);
 694        if (evict) {
 695                ret = ttm_bo_wait(bo, false, false);
 696                if (ret)
 697                        return ret;
 698
 699                if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 700                        ttm_tt_destroy(bo->ttm);
 701                        bo->ttm = NULL;
 702                }
 703                ttm_bo_free_old_node(bo);
 704        } else {
 705                /**
 706                 * This should help pipeline ordinary buffer moves.
 707                 *
 708                 * Hang old buffer memory on a new buffer object,
 709                 * and leave it to be released when the GPU
 710                 * operation has completed.
 711                 */
 712
 713                dma_fence_put(bo->moving);
 714                bo->moving = dma_fence_get(fence);
 715
 716                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 717                if (ret)
 718                        return ret;
 719
 720                reservation_object_add_excl_fence(ghost_obj->resv, fence);
 721
 722                /**
 723                 * If we're not moving to fixed memory, the TTM object
 724                 * needs to stay alive. Otherwhise hang it on the ghost
 725                 * bo to be unbound and destroyed.
 726                 */
 727
 728                if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
 729                        ghost_obj->ttm = NULL;
 730                else
 731                        bo->ttm = NULL;
 732
 733                ttm_bo_unreserve(ghost_obj);
 734                ttm_bo_put(ghost_obj);
 735        }
 736
 737        *old_mem = *new_mem;
 738        new_mem->mm_node = NULL;
 739
 740        return 0;
 741}
 742EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
 743
 744int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 745                         struct dma_fence *fence, bool evict,
 746                         struct ttm_mem_reg *new_mem)
 747{
 748        struct ttm_bo_device *bdev = bo->bdev;
 749        struct ttm_mem_reg *old_mem = &bo->mem;
 750
 751        struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
 752        struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
 753
 754        int ret;
 755
 756        reservation_object_add_excl_fence(bo->resv, fence);
 757
 758        if (!evict) {
 759                struct ttm_buffer_object *ghost_obj;
 760
 761                /**
 762                 * This should help pipeline ordinary buffer moves.
 763                 *
 764                 * Hang old buffer memory on a new buffer object,
 765                 * and leave it to be released when the GPU
 766                 * operation has completed.
 767                 */
 768
 769                dma_fence_put(bo->moving);
 770                bo->moving = dma_fence_get(fence);
 771
 772                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 773                if (ret)
 774                        return ret;
 775
 776                reservation_object_add_excl_fence(ghost_obj->resv, fence);
 777
 778                /**
 779                 * If we're not moving to fixed memory, the TTM object
 780                 * needs to stay alive. Otherwhise hang it on the ghost
 781                 * bo to be unbound and destroyed.
 782                 */
 783
 784                if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
 785                        ghost_obj->ttm = NULL;
 786                else
 787                        bo->ttm = NULL;
 788
 789                ttm_bo_unreserve(ghost_obj);
 790                ttm_bo_put(ghost_obj);
 791
 792        } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
 793
 794                /**
 795                 * BO doesn't have a TTM we need to bind/unbind. Just remember
 796                 * this eviction and free up the allocation
 797                 */
 798
 799                spin_lock(&from->move_lock);
 800                if (!from->move || dma_fence_is_later(fence, from->move)) {
 801                        dma_fence_put(from->move);
 802                        from->move = dma_fence_get(fence);
 803                }
 804                spin_unlock(&from->move_lock);
 805
 806                ttm_bo_free_old_node(bo);
 807
 808                dma_fence_put(bo->moving);
 809                bo->moving = dma_fence_get(fence);
 810
 811        } else {
 812                /**
 813                 * Last resort, wait for the move to be completed.
 814                 *
 815                 * Should never happen in pratice.
 816                 */
 817
 818                ret = ttm_bo_wait(bo, false, false);
 819                if (ret)
 820                        return ret;
 821
 822                if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
 823                        ttm_tt_destroy(bo->ttm);
 824                        bo->ttm = NULL;
 825                }
 826                ttm_bo_free_old_node(bo);
 827        }
 828
 829        *old_mem = *new_mem;
 830        new_mem->mm_node = NULL;
 831
 832        return 0;
 833}
 834EXPORT_SYMBOL(ttm_bo_pipeline_move);
 835
 836int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 837{
 838        struct ttm_buffer_object *ghost;
 839        int ret;
 840
 841        ret = ttm_buffer_object_transfer(bo, &ghost);
 842        if (ret)
 843                return ret;
 844
 845        ret = reservation_object_copy_fences(ghost->resv, bo->resv);
 846        /* Last resort, wait for the BO to be idle when we are OOM */
 847        if (ret)
 848                ttm_bo_wait(bo, false, false);
 849
 850        memset(&bo->mem, 0, sizeof(bo->mem));
 851        bo->mem.mem_type = TTM_PL_SYSTEM;
 852        bo->ttm = NULL;
 853
 854        ttm_bo_unreserve(ghost);
 855        ttm_bo_put(ghost);
 856
 857        return 0;
 858}
 859