linux/drivers/gpu/drm/ttm/ttm_bo_util.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <drm/ttm/ttm_placement.h>
  34#include <drm/drm_vma_manager.h>
  35#include <linux/io.h>
  36#include <linux/highmem.h>
  37#include <linux/wait.h>
  38#include <linux/slab.h>
  39#include <linux/vmalloc.h>
  40#include <linux/module.h>
  41#include <linux/reservation.h>
  42
  43struct ttm_transfer_obj {
  44        struct ttm_buffer_object base;
  45        struct ttm_buffer_object *bo;
  46};
  47
  48void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
  49{
  50        ttm_bo_mem_put(bo, &bo->mem);
  51}
  52
  53int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  54                   struct ttm_operation_ctx *ctx,
  55                    struct ttm_mem_reg *new_mem)
  56{
  57        struct ttm_tt *ttm = bo->ttm;
  58        struct ttm_mem_reg *old_mem = &bo->mem;
  59        int ret;
  60
  61        if (old_mem->mem_type != TTM_PL_SYSTEM) {
  62                ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
  63
  64                if (unlikely(ret != 0)) {
  65                        if (ret != -ERESTARTSYS)
  66                                pr_err("Failed to expire sync object before unbinding TTM\n");
  67                        return ret;
  68                }
  69
  70                ttm_tt_unbind(ttm);
  71                ttm_bo_free_old_node(bo);
  72                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
  73                                TTM_PL_MASK_MEM);
  74                old_mem->mem_type = TTM_PL_SYSTEM;
  75        }
  76
  77        ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
  78        if (unlikely(ret != 0))
  79                return ret;
  80
  81        if (new_mem->mem_type != TTM_PL_SYSTEM) {
  82                ret = ttm_tt_bind(ttm, new_mem, ctx);
  83                if (unlikely(ret != 0))
  84                        return ret;
  85        }
  86
  87        *old_mem = *new_mem;
  88        new_mem->mm_node = NULL;
  89
  90        return 0;
  91}
  92EXPORT_SYMBOL(ttm_bo_move_ttm);
  93
  94int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
  95{
  96        if (likely(man->io_reserve_fastpath))
  97                return 0;
  98
  99        if (interruptible)
 100                return mutex_lock_interruptible(&man->io_reserve_mutex);
 101
 102        mutex_lock(&man->io_reserve_mutex);
 103        return 0;
 104}
 105EXPORT_SYMBOL(ttm_mem_io_lock);
 106
 107void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 108{
 109        if (likely(man->io_reserve_fastpath))
 110                return;
 111
 112        mutex_unlock(&man->io_reserve_mutex);
 113}
 114EXPORT_SYMBOL(ttm_mem_io_unlock);
 115
 116static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
 117{
 118        struct ttm_buffer_object *bo;
 119
 120        if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
 121                return -EAGAIN;
 122
 123        bo = list_first_entry(&man->io_reserve_lru,
 124                              struct ttm_buffer_object,
 125                              io_reserve_lru);
 126        list_del_init(&bo->io_reserve_lru);
 127        ttm_bo_unmap_virtual_locked(bo);
 128
 129        return 0;
 130}
 131
 132
 133int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
 134                       struct ttm_mem_reg *mem)
 135{
 136        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 137        int ret = 0;
 138
 139        if (!bdev->driver->io_mem_reserve)
 140                return 0;
 141        if (likely(man->io_reserve_fastpath))
 142                return bdev->driver->io_mem_reserve(bdev, mem);
 143
 144        if (bdev->driver->io_mem_reserve &&
 145            mem->bus.io_reserved_count++ == 0) {
 146retry:
 147                ret = bdev->driver->io_mem_reserve(bdev, mem);
 148                if (ret == -EAGAIN) {
 149                        ret = ttm_mem_io_evict(man);
 150                        if (ret == 0)
 151                                goto retry;
 152                }
 153        }
 154        return ret;
 155}
 156EXPORT_SYMBOL(ttm_mem_io_reserve);
 157
 158void ttm_mem_io_free(struct ttm_bo_device *bdev,
 159                     struct ttm_mem_reg *mem)
 160{
 161        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 162
 163        if (likely(man->io_reserve_fastpath))
 164                return;
 165
 166        if (bdev->driver->io_mem_reserve &&
 167            --mem->bus.io_reserved_count == 0 &&
 168            bdev->driver->io_mem_free)
 169                bdev->driver->io_mem_free(bdev, mem);
 170
 171}
 172EXPORT_SYMBOL(ttm_mem_io_free);
 173
 174int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
 175{
 176        struct ttm_mem_reg *mem = &bo->mem;
 177        int ret;
 178
 179        if (!mem->bus.io_reserved_vm) {
 180                struct ttm_mem_type_manager *man =
 181                        &bo->bdev->man[mem->mem_type];
 182
 183                ret = ttm_mem_io_reserve(bo->bdev, mem);
 184                if (unlikely(ret != 0))
 185                        return ret;
 186                mem->bus.io_reserved_vm = true;
 187                if (man->use_io_reserve_lru)
 188                        list_add_tail(&bo->io_reserve_lru,
 189                                      &man->io_reserve_lru);
 190        }
 191        return 0;
 192}
 193
 194void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
 195{
 196        struct ttm_mem_reg *mem = &bo->mem;
 197
 198        if (mem->bus.io_reserved_vm) {
 199                mem->bus.io_reserved_vm = false;
 200                list_del_init(&bo->io_reserve_lru);
 201                ttm_mem_io_free(bo->bdev, mem);
 202        }
 203}
 204
 205static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 206                        void **virtual)
 207{
 208        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 209        int ret;
 210        void *addr;
 211
 212        *virtual = NULL;
 213        (void) ttm_mem_io_lock(man, false);
 214        ret = ttm_mem_io_reserve(bdev, mem);
 215        ttm_mem_io_unlock(man);
 216        if (ret || !mem->bus.is_iomem)
 217                return ret;
 218
 219        if (mem->bus.addr) {
 220                addr = mem->bus.addr;
 221        } else {
 222                if (mem->placement & TTM_PL_FLAG_WC)
 223                        addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
 224                else
 225                        addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
 226                if (!addr) {
 227                        (void) ttm_mem_io_lock(man, false);
 228                        ttm_mem_io_free(bdev, mem);
 229                        ttm_mem_io_unlock(man);
 230                        return -ENOMEM;
 231                }
 232        }
 233        *virtual = addr;
 234        return 0;
 235}
 236
 237static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 238                         void *virtual)
 239{
 240        struct ttm_mem_type_manager *man;
 241
 242        man = &bdev->man[mem->mem_type];
 243
 244        if (virtual && mem->bus.addr == NULL)
 245                iounmap(virtual);
 246        (void) ttm_mem_io_lock(man, false);
 247        ttm_mem_io_free(bdev, mem);
 248        ttm_mem_io_unlock(man);
 249}
 250
 251static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
 252{
 253        uint32_t *dstP =
 254            (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
 255        uint32_t *srcP =
 256            (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
 257
 258        int i;
 259        for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
 260                iowrite32(ioread32(srcP++), dstP++);
 261        return 0;
 262}
 263
 264#ifdef CONFIG_X86
 265#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
 266#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
 267#else
 268#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
 269#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
 270#endif
 271
 272
 273/**
 274 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
 275 * specified page protection.
 276 *
 277 * @page: The page to map.
 278 * @prot: The page protection.
 279 *
 280 * This function maps a TTM page using the kmap_atomic api if available,
 281 * otherwise falls back to vmap. The user must make sure that the
 282 * specified page does not have an aliased mapping with a different caching
 283 * policy unless the architecture explicitly allows it. Also mapping and
 284 * unmapping using this api must be correctly nested. Unmapping should
 285 * occur in the reverse order of mapping.
 286 */
 287void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
 288{
 289        if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
 290                return kmap_atomic(page);
 291        else
 292                return __ttm_kmap_atomic_prot(page, prot);
 293}
 294EXPORT_SYMBOL(ttm_kmap_atomic_prot);
 295
 296/**
 297 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
 298 * ttm_kmap_atomic_prot.
 299 *
 300 * @addr: The virtual address from the map.
 301 * @prot: The page protection.
 302 */
 303void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
 304{
 305        if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
 306                kunmap_atomic(addr);
 307        else
 308                __ttm_kunmap_atomic(addr);
 309}
 310EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
 311
 312static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
 313                                unsigned long page,
 314                                pgprot_t prot)
 315{
 316        struct page *d = ttm->pages[page];
 317        void *dst;
 318
 319        if (!d)
 320                return -ENOMEM;
 321
 322        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 323        dst = ttm_kmap_atomic_prot(d, prot);
 324        if (!dst)
 325                return -ENOMEM;
 326
 327        memcpy_fromio(dst, src, PAGE_SIZE);
 328
 329        ttm_kunmap_atomic_prot(dst, prot);
 330
 331        return 0;
 332}
 333
 334static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 335                                unsigned long page,
 336                                pgprot_t prot)
 337{
 338        struct page *s = ttm->pages[page];
 339        void *src;
 340
 341        if (!s)
 342                return -ENOMEM;
 343
 344        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
 345        src = ttm_kmap_atomic_prot(s, prot);
 346        if (!src)
 347                return -ENOMEM;
 348
 349        memcpy_toio(dst, src, PAGE_SIZE);
 350
 351        ttm_kunmap_atomic_prot(src, prot);
 352
 353        return 0;
 354}
 355
 356int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 357                       struct ttm_operation_ctx *ctx,
 358                       struct ttm_mem_reg *new_mem)
 359{
 360        struct ttm_bo_device *bdev = bo->bdev;
 361        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 362        struct ttm_tt *ttm = bo->ttm;
 363        struct ttm_mem_reg *old_mem = &bo->mem;
 364        struct ttm_mem_reg old_copy = *old_mem;
 365        void *old_iomap;
 366        void *new_iomap;
 367        int ret;
 368        unsigned long i;
 369        unsigned long page;
 370        unsigned long add = 0;
 371        int dir;
 372
 373        ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
 374        if (ret)
 375                return ret;
 376
 377        ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
 378        if (ret)
 379                return ret;
 380        ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
 381        if (ret)
 382                goto out;
 383
 384        /*
 385         * Single TTM move. NOP.
 386         */
 387        if (old_iomap == NULL && new_iomap == NULL)
 388                goto out2;
 389
 390        /*
 391         * Don't move nonexistent data. Clear destination instead.
 392         */
 393        if (old_iomap == NULL &&
 394            (ttm == NULL || (ttm->state == tt_unpopulated &&
 395                             !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
 396                memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
 397                goto out2;
 398        }
 399
 400        /*
 401         * TTM might be null for moves within the same region.
 402         */
 403        if (ttm) {
 404                ret = ttm_tt_populate(ttm, ctx);
 405                if (ret)
 406                        goto out1;
 407        }
 408
 409        add = 0;
 410        dir = 1;
 411
 412        if ((old_mem->mem_type == new_mem->mem_type) &&
 413            (new_mem->start < old_mem->start + old_mem->size)) {
 414                dir = -1;
 415                add = new_mem->num_pages - 1;
 416        }
 417
 418        for (i = 0; i < new_mem->num_pages; ++i) {
 419                page = i * dir + add;
 420                if (old_iomap == NULL) {
 421                        pgprot_t prot = ttm_io_prot(old_mem->placement,
 422                                                    PAGE_KERNEL);
 423                        ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
 424                                                   prot);
 425                } else if (new_iomap == NULL) {
 426                        pgprot_t prot = ttm_io_prot(new_mem->placement,
 427                                                    PAGE_KERNEL);
 428                        ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
 429                                                   prot);
 430                } else {
 431                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
 432                }
 433                if (ret)
 434                        goto out1;
 435        }
 436        mb();
 437out2:
 438        old_copy = *old_mem;
 439        *old_mem = *new_mem;
 440        new_mem->mm_node = NULL;
 441
 442        if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 443                ttm_tt_destroy(ttm);
 444                bo->ttm = NULL;
 445        }
 446
 447out1:
 448        ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
 449out:
 450        ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
 451
 452        /*
 453         * On error, keep the mm node!
 454         */
 455        if (!ret)
 456                ttm_bo_mem_put(bo, &old_copy);
 457        return ret;
 458}
 459EXPORT_SYMBOL(ttm_bo_move_memcpy);
 460
 461static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
 462{
 463        struct ttm_transfer_obj *fbo;
 464
 465        fbo = container_of(bo, struct ttm_transfer_obj, base);
 466        ttm_bo_put(fbo->bo);
 467        kfree(fbo);
 468}
 469
 470/**
 471 * ttm_buffer_object_transfer
 472 *
 473 * @bo: A pointer to a struct ttm_buffer_object.
 474 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 475 * holding the data of @bo with the old placement.
 476 *
 477 * This is a utility function that may be called after an accelerated move
 478 * has been scheduled. A new buffer object is created as a placeholder for
 479 * the old data while it's being copied. When that buffer object is idle,
 480 * it can be destroyed, releasing the space of the old placement.
 481 * Returns:
 482 * !0: Failure.
 483 */
 484
 485static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 486                                      struct ttm_buffer_object **new_obj)
 487{
 488        struct ttm_transfer_obj *fbo;
 489        int ret;
 490
 491        fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
 492        if (!fbo)
 493                return -ENOMEM;
 494
 495        fbo->base = *bo;
 496        fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
 497
 498        ttm_bo_get(bo);
 499        fbo->bo = bo;
 500
 501        /**
 502         * Fix up members that we shouldn't copy directly:
 503         * TODO: Explicit member copy would probably be better here.
 504         */
 505
 506        atomic_inc(&bo->bdev->glob->bo_count);
 507        INIT_LIST_HEAD(&fbo->base.ddestroy);
 508        INIT_LIST_HEAD(&fbo->base.lru);
 509        INIT_LIST_HEAD(&fbo->base.swap);
 510        INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
 511        mutex_init(&fbo->base.wu_mutex);
 512        fbo->base.moving = NULL;
 513        drm_vma_node_reset(&fbo->base.vma_node);
 514        atomic_set(&fbo->base.cpu_writers, 0);
 515
 516        kref_init(&fbo->base.list_kref);
 517        kref_init(&fbo->base.kref);
 518        fbo->base.destroy = &ttm_transfered_destroy;
 519        fbo->base.acc_size = 0;
 520        fbo->base.resv = &fbo->base.ttm_resv;
 521        reservation_object_init(fbo->base.resv);
 522        ret = reservation_object_trylock(fbo->base.resv);
 523        WARN_ON(!ret);
 524
 525        *new_obj = &fbo->base;
 526        return 0;
 527}
 528
 529pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 530{
 531        /* Cached mappings need no adjustment */
 532        if (caching_flags & TTM_PL_FLAG_CACHED)
 533                return tmp;
 534
 535#if defined(__i386__) || defined(__x86_64__)
 536        if (caching_flags & TTM_PL_FLAG_WC)
 537                tmp = pgprot_writecombine(tmp);
 538        else if (boot_cpu_data.x86 > 3)
 539                tmp = pgprot_noncached(tmp);
 540#endif
 541#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
 542    defined(__powerpc__) || defined(__mips__)
 543        if (caching_flags & TTM_PL_FLAG_WC)
 544                tmp = pgprot_writecombine(tmp);
 545        else
 546                tmp = pgprot_noncached(tmp);
 547#endif
 548#if defined(__sparc__)
 549        tmp = pgprot_noncached(tmp);
 550#endif
 551        return tmp;
 552}
 553EXPORT_SYMBOL(ttm_io_prot);
 554
 555static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
 556                          unsigned long offset,
 557                          unsigned long size,
 558                          struct ttm_bo_kmap_obj *map)
 559{
 560        struct ttm_mem_reg *mem = &bo->mem;
 561
 562        if (bo->mem.bus.addr) {
 563                map->bo_kmap_type = ttm_bo_map_premapped;
 564                map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
 565        } else {
 566                map->bo_kmap_type = ttm_bo_map_iomap;
 567                if (mem->placement & TTM_PL_FLAG_WC)
 568                        map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
 569                                                  size);
 570                else
 571                        map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
 572                                                       size);
 573        }
 574        return (!map->virtual) ? -ENOMEM : 0;
 575}
 576
 577static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 578                           unsigned long start_page,
 579                           unsigned long num_pages,
 580                           struct ttm_bo_kmap_obj *map)
 581{
 582        struct ttm_mem_reg *mem = &bo->mem;
 583        struct ttm_operation_ctx ctx = {
 584                .interruptible = false,
 585                .no_wait_gpu = false
 586        };
 587        struct ttm_tt *ttm = bo->ttm;
 588        pgprot_t prot;
 589        int ret;
 590
 591        BUG_ON(!ttm);
 592
 593        ret = ttm_tt_populate(ttm, &ctx);
 594        if (ret)
 595                return ret;
 596
 597        if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
 598                /*
 599                 * We're mapping a single page, and the desired
 600                 * page protection is consistent with the bo.
 601                 */
 602
 603                map->bo_kmap_type = ttm_bo_map_kmap;
 604                map->page = ttm->pages[start_page];
 605                map->virtual = kmap(map->page);
 606        } else {
 607                /*
 608                 * We need to use vmap to get the desired page protection
 609                 * or to make the buffer object look contiguous.
 610                 */
 611                prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
 612                map->bo_kmap_type = ttm_bo_map_vmap;
 613                map->virtual = vmap(ttm->pages + start_page, num_pages,
 614                                    0, prot);
 615        }
 616        return (!map->virtual) ? -ENOMEM : 0;
 617}
 618
 619int ttm_bo_kmap(struct ttm_buffer_object *bo,
 620                unsigned long start_page, unsigned long num_pages,
 621                struct ttm_bo_kmap_obj *map)
 622{
 623        struct ttm_mem_type_manager *man =
 624                &bo->bdev->man[bo->mem.mem_type];
 625        unsigned long offset, size;
 626        int ret;
 627
 628        map->virtual = NULL;
 629        map->bo = bo;
 630        if (num_pages > bo->num_pages)
 631                return -EINVAL;
 632        if (start_page > bo->num_pages)
 633                return -EINVAL;
 634
 635        (void) ttm_mem_io_lock(man, false);
 636        ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
 637        ttm_mem_io_unlock(man);
 638        if (ret)
 639                return ret;
 640        if (!bo->mem.bus.is_iomem) {
 641                return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
 642        } else {
 643                offset = start_page << PAGE_SHIFT;
 644                size = num_pages << PAGE_SHIFT;
 645                return ttm_bo_ioremap(bo, offset, size, map);
 646        }
 647}
 648EXPORT_SYMBOL(ttm_bo_kmap);
 649
 650void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 651{
 652        struct ttm_buffer_object *bo = map->bo;
 653        struct ttm_mem_type_manager *man =
 654                &bo->bdev->man[bo->mem.mem_type];
 655
 656        if (!map->virtual)
 657                return;
 658        switch (map->bo_kmap_type) {
 659        case ttm_bo_map_iomap:
 660                iounmap(map->virtual);
 661                break;
 662        case ttm_bo_map_vmap:
 663                vunmap(map->virtual);
 664                break;
 665        case ttm_bo_map_kmap:
 666                kunmap(map->page);
 667                break;
 668        case ttm_bo_map_premapped:
 669                break;
 670        default:
 671                BUG();
 672        }
 673        (void) ttm_mem_io_lock(man, false);
 674        ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
 675        ttm_mem_io_unlock(man);
 676        map->virtual = NULL;
 677        map->page = NULL;
 678}
 679EXPORT_SYMBOL(ttm_bo_kunmap);
 680
 681int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 682                              struct dma_fence *fence,
 683                              bool evict,
 684                              struct ttm_mem_reg *new_mem)
 685{
 686        struct ttm_bo_device *bdev = bo->bdev;
 687        struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 688        struct ttm_mem_reg *old_mem = &bo->mem;
 689        int ret;
 690        struct ttm_buffer_object *ghost_obj;
 691
 692        reservation_object_add_excl_fence(bo->resv, fence);
 693        if (evict) {
 694                ret = ttm_bo_wait(bo, false, false);
 695                if (ret)
 696                        return ret;
 697
 698                if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 699                        ttm_tt_destroy(bo->ttm);
 700                        bo->ttm = NULL;
 701                }
 702                ttm_bo_free_old_node(bo);
 703        } else {
 704                /**
 705                 * This should help pipeline ordinary buffer moves.
 706                 *
 707                 * Hang old buffer memory on a new buffer object,
 708                 * and leave it to be released when the GPU
 709                 * operation has completed.
 710                 */
 711
 712                dma_fence_put(bo->moving);
 713                bo->moving = dma_fence_get(fence);
 714
 715                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 716                if (ret)
 717                        return ret;
 718
 719                reservation_object_add_excl_fence(ghost_obj->resv, fence);
 720
 721                /**
 722                 * If we're not moving to fixed memory, the TTM object
 723                 * needs to stay alive. Otherwhise hang it on the ghost
 724                 * bo to be unbound and destroyed.
 725                 */
 726
 727                if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
 728                        ghost_obj->ttm = NULL;
 729                else
 730                        bo->ttm = NULL;
 731
 732                ttm_bo_unreserve(ghost_obj);
 733                ttm_bo_put(ghost_obj);
 734        }
 735
 736        *old_mem = *new_mem;
 737        new_mem->mm_node = NULL;
 738
 739        return 0;
 740}
 741EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
 742
 743int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 744                         struct dma_fence *fence, bool evict,
 745                         struct ttm_mem_reg *new_mem)
 746{
 747        struct ttm_bo_device *bdev = bo->bdev;
 748        struct ttm_mem_reg *old_mem = &bo->mem;
 749
 750        struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
 751        struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
 752
 753        int ret;
 754
 755        reservation_object_add_excl_fence(bo->resv, fence);
 756
 757        if (!evict) {
 758                struct ttm_buffer_object *ghost_obj;
 759
 760                /**
 761                 * This should help pipeline ordinary buffer moves.
 762                 *
 763                 * Hang old buffer memory on a new buffer object,
 764                 * and leave it to be released when the GPU
 765                 * operation has completed.
 766                 */
 767
 768                dma_fence_put(bo->moving);
 769                bo->moving = dma_fence_get(fence);
 770
 771                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 772                if (ret)
 773                        return ret;
 774
 775                reservation_object_add_excl_fence(ghost_obj->resv, fence);
 776
 777                /**
 778                 * If we're not moving to fixed memory, the TTM object
 779                 * needs to stay alive. Otherwhise hang it on the ghost
 780                 * bo to be unbound and destroyed.
 781                 */
 782
 783                if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
 784                        ghost_obj->ttm = NULL;
 785                else
 786                        bo->ttm = NULL;
 787
 788                ttm_bo_unreserve(ghost_obj);
 789                ttm_bo_put(ghost_obj);
 790
 791        } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
 792
 793                /**
 794                 * BO doesn't have a TTM we need to bind/unbind. Just remember
 795                 * this eviction and free up the allocation
 796                 */
 797
 798                spin_lock(&from->move_lock);
 799                if (!from->move || dma_fence_is_later(fence, from->move)) {
 800                        dma_fence_put(from->move);
 801                        from->move = dma_fence_get(fence);
 802                }
 803                spin_unlock(&from->move_lock);
 804
 805                ttm_bo_free_old_node(bo);
 806
 807                dma_fence_put(bo->moving);
 808                bo->moving = dma_fence_get(fence);
 809
 810        } else {
 811                /**
 812                 * Last resort, wait for the move to be completed.
 813                 *
 814                 * Should never happen in pratice.
 815                 */
 816
 817                ret = ttm_bo_wait(bo, false, false);
 818                if (ret)
 819                        return ret;
 820
 821                if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
 822                        ttm_tt_destroy(bo->ttm);
 823                        bo->ttm = NULL;
 824                }
 825                ttm_bo_free_old_node(bo);
 826        }
 827
 828        *old_mem = *new_mem;
 829        new_mem->mm_node = NULL;
 830
 831        return 0;
 832}
 833EXPORT_SYMBOL(ttm_bo_pipeline_move);
 834
 835int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 836{
 837        struct ttm_buffer_object *ghost;
 838        int ret;
 839
 840        ret = ttm_buffer_object_transfer(bo, &ghost);
 841        if (ret)
 842                return ret;
 843
 844        ret = reservation_object_copy_fences(ghost->resv, bo->resv);
 845        /* Last resort, wait for the BO to be idle when we are OOM */
 846        if (ret)
 847                ttm_bo_wait(bo, false, false);
 848
 849        memset(&bo->mem, 0, sizeof(bo->mem));
 850        bo->mem.mem_type = TTM_PL_SYSTEM;
 851        bo->ttm = NULL;
 852
 853        ttm_bo_unreserve(ghost);
 854        ttm_bo_put(ghost);
 855
 856        return 0;
 857}
 858