linux/drivers/gpu/drm/ttm/ttm_bo_util.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#include <drm/ttm/ttm_bo_driver.h>
  33#include <drm/ttm/ttm_placement.h>
  34#include <drm/drm_cache.h>
  35#include <drm/drm_vma_manager.h>
  36#include <linux/dma-buf-map.h>
  37#include <linux/io.h>
  38#include <linux/highmem.h>
  39#include <linux/wait.h>
  40#include <linux/slab.h>
  41#include <linux/vmalloc.h>
  42#include <linux/module.h>
  43#include <linux/dma-resv.h>
  44
  45struct ttm_transfer_obj {
  46        struct ttm_buffer_object base;
  47        struct ttm_buffer_object *bo;
  48};
  49
  50int ttm_mem_io_reserve(struct ttm_device *bdev,
  51                       struct ttm_resource *mem)
  52{
  53        if (mem->bus.offset || mem->bus.addr)
  54                return 0;
  55
  56        mem->bus.is_iomem = false;
  57        if (!bdev->funcs->io_mem_reserve)
  58                return 0;
  59
  60        return bdev->funcs->io_mem_reserve(bdev, mem);
  61}
  62
  63void ttm_mem_io_free(struct ttm_device *bdev,
  64                     struct ttm_resource *mem)
  65{
  66        if (!mem)
  67                return;
  68
  69        if (!mem->bus.offset && !mem->bus.addr)
  70                return;
  71
  72        if (bdev->funcs->io_mem_free)
  73                bdev->funcs->io_mem_free(bdev, mem);
  74
  75        mem->bus.offset = 0;
  76        mem->bus.addr = NULL;
  77}
  78
  79/**
  80 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
  81 * @bo: The struct ttm_buffer_object.
  82 * @new_mem: The struct ttm_resource we're moving to (copy destination).
  83 * @new_iter: A struct ttm_kmap_iter representing the destination resource.
  84 * @src_iter: A struct ttm_kmap_iter representing the source resource.
  85 *
  86 * This function is intended to be able to move out async under a
  87 * dma-fence if desired.
  88 */
  89void ttm_move_memcpy(struct ttm_buffer_object *bo,
  90                     u32 num_pages,
  91                     struct ttm_kmap_iter *dst_iter,
  92                     struct ttm_kmap_iter *src_iter)
  93{
  94        const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
  95        const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
  96        struct ttm_tt *ttm = bo->ttm;
  97        struct dma_buf_map src_map, dst_map;
  98        pgoff_t i;
  99
 100        /* Single TTM move. NOP */
 101        if (dst_ops->maps_tt && src_ops->maps_tt)
 102                return;
 103
 104        /* Don't move nonexistent data. Clear destination instead. */
 105        if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
 106                if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
 107                        return;
 108
 109                for (i = 0; i < num_pages; ++i) {
 110                        dst_ops->map_local(dst_iter, &dst_map, i);
 111                        if (dst_map.is_iomem)
 112                                memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
 113                        else
 114                                memset(dst_map.vaddr, 0, PAGE_SIZE);
 115                        if (dst_ops->unmap_local)
 116                                dst_ops->unmap_local(dst_iter, &dst_map);
 117                }
 118                return;
 119        }
 120
 121        for (i = 0; i < num_pages; ++i) {
 122                dst_ops->map_local(dst_iter, &dst_map, i);
 123                src_ops->map_local(src_iter, &src_map, i);
 124
 125                drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
 126
 127                if (src_ops->unmap_local)
 128                        src_ops->unmap_local(src_iter, &src_map);
 129                if (dst_ops->unmap_local)
 130                        dst_ops->unmap_local(dst_iter, &dst_map);
 131        }
 132}
 133EXPORT_SYMBOL(ttm_move_memcpy);
 134
 135int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 136                       struct ttm_operation_ctx *ctx,
 137                       struct ttm_resource *dst_mem)
 138{
 139        struct ttm_device *bdev = bo->bdev;
 140        struct ttm_resource_manager *dst_man =
 141                ttm_manager_type(bo->bdev, dst_mem->mem_type);
 142        struct ttm_tt *ttm = bo->ttm;
 143        struct ttm_resource *src_mem = bo->resource;
 144        struct ttm_resource_manager *src_man =
 145                ttm_manager_type(bdev, src_mem->mem_type);
 146        union {
 147                struct ttm_kmap_iter_tt tt;
 148                struct ttm_kmap_iter_linear_io io;
 149        } _dst_iter, _src_iter;
 150        struct ttm_kmap_iter *dst_iter, *src_iter;
 151        int ret = 0;
 152
 153        if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
 154                    dst_man->use_tt)) {
 155                ret = ttm_tt_populate(bdev, ttm, ctx);
 156                if (ret)
 157                        return ret;
 158        }
 159
 160        dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
 161        if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
 162                dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
 163        if (IS_ERR(dst_iter))
 164                return PTR_ERR(dst_iter);
 165
 166        src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
 167        if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
 168                src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
 169        if (IS_ERR(src_iter)) {
 170                ret = PTR_ERR(src_iter);
 171                goto out_src_iter;
 172        }
 173
 174        ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
 175
 176        if (!src_iter->ops->maps_tt)
 177                ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
 178        ttm_bo_move_sync_cleanup(bo, dst_mem);
 179
 180out_src_iter:
 181        if (!dst_iter->ops->maps_tt)
 182                ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
 183
 184        return ret;
 185}
 186EXPORT_SYMBOL(ttm_bo_move_memcpy);
 187
 188static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
 189{
 190        struct ttm_transfer_obj *fbo;
 191
 192        fbo = container_of(bo, struct ttm_transfer_obj, base);
 193        dma_resv_fini(&fbo->base.base._resv);
 194        ttm_bo_put(fbo->bo);
 195        kfree(fbo);
 196}
 197
 198/**
 199 * ttm_buffer_object_transfer
 200 *
 201 * @bo: A pointer to a struct ttm_buffer_object.
 202 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
 203 * holding the data of @bo with the old placement.
 204 *
 205 * This is a utility function that may be called after an accelerated move
 206 * has been scheduled. A new buffer object is created as a placeholder for
 207 * the old data while it's being copied. When that buffer object is idle,
 208 * it can be destroyed, releasing the space of the old placement.
 209 * Returns:
 210 * !0: Failure.
 211 */
 212
 213static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 214                                      struct ttm_buffer_object **new_obj)
 215{
 216        struct ttm_transfer_obj *fbo;
 217        int ret;
 218
 219        fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
 220        if (!fbo)
 221                return -ENOMEM;
 222
 223        fbo->base = *bo;
 224
 225        ttm_bo_get(bo);
 226        fbo->bo = bo;
 227
 228        /**
 229         * Fix up members that we shouldn't copy directly:
 230         * TODO: Explicit member copy would probably be better here.
 231         */
 232
 233        atomic_inc(&ttm_glob.bo_count);
 234        INIT_LIST_HEAD(&fbo->base.ddestroy);
 235        INIT_LIST_HEAD(&fbo->base.lru);
 236        fbo->base.moving = NULL;
 237        drm_vma_node_reset(&fbo->base.base.vma_node);
 238
 239        kref_init(&fbo->base.kref);
 240        fbo->base.destroy = &ttm_transfered_destroy;
 241        fbo->base.pin_count = 0;
 242        if (bo->type != ttm_bo_type_sg)
 243                fbo->base.base.resv = &fbo->base.base._resv;
 244
 245        dma_resv_init(&fbo->base.base._resv);
 246        fbo->base.base.dev = NULL;
 247        ret = dma_resv_trylock(&fbo->base.base._resv);
 248        WARN_ON(!ret);
 249
 250        ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
 251
 252        *new_obj = &fbo->base;
 253        return 0;
 254}
 255
 256pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
 257                     pgprot_t tmp)
 258{
 259        struct ttm_resource_manager *man;
 260        enum ttm_caching caching;
 261
 262        man = ttm_manager_type(bo->bdev, res->mem_type);
 263        caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
 264
 265        return ttm_prot_from_caching(caching, tmp);
 266}
 267EXPORT_SYMBOL(ttm_io_prot);
 268
 269static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
 270                          unsigned long offset,
 271                          unsigned long size,
 272                          struct ttm_bo_kmap_obj *map)
 273{
 274        struct ttm_resource *mem = bo->resource;
 275
 276        if (bo->resource->bus.addr) {
 277                map->bo_kmap_type = ttm_bo_map_premapped;
 278                map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
 279        } else {
 280                resource_size_t res = bo->resource->bus.offset + offset;
 281
 282                map->bo_kmap_type = ttm_bo_map_iomap;
 283                if (mem->bus.caching == ttm_write_combined)
 284                        map->virtual = ioremap_wc(res, size);
 285#ifdef CONFIG_X86
 286                else if (mem->bus.caching == ttm_cached)
 287                        map->virtual = ioremap_cache(res, size);
 288#endif
 289                else
 290                        map->virtual = ioremap(res, size);
 291        }
 292        return (!map->virtual) ? -ENOMEM : 0;
 293}
 294
 295static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
 296                           unsigned long start_page,
 297                           unsigned long num_pages,
 298                           struct ttm_bo_kmap_obj *map)
 299{
 300        struct ttm_resource *mem = bo->resource;
 301        struct ttm_operation_ctx ctx = {
 302                .interruptible = false,
 303                .no_wait_gpu = false
 304        };
 305        struct ttm_tt *ttm = bo->ttm;
 306        pgprot_t prot;
 307        int ret;
 308
 309        BUG_ON(!ttm);
 310
 311        ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
 312        if (ret)
 313                return ret;
 314
 315        if (num_pages == 1 && ttm->caching == ttm_cached) {
 316                /*
 317                 * We're mapping a single page, and the desired
 318                 * page protection is consistent with the bo.
 319                 */
 320
 321                map->bo_kmap_type = ttm_bo_map_kmap;
 322                map->page = ttm->pages[start_page];
 323                map->virtual = kmap(map->page);
 324        } else {
 325                /*
 326                 * We need to use vmap to get the desired page protection
 327                 * or to make the buffer object look contiguous.
 328                 */
 329                prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
 330                map->bo_kmap_type = ttm_bo_map_vmap;
 331                map->virtual = vmap(ttm->pages + start_page, num_pages,
 332                                    0, prot);
 333        }
 334        return (!map->virtual) ? -ENOMEM : 0;
 335}
 336
 337int ttm_bo_kmap(struct ttm_buffer_object *bo,
 338                unsigned long start_page, unsigned long num_pages,
 339                struct ttm_bo_kmap_obj *map)
 340{
 341        unsigned long offset, size;
 342        int ret;
 343
 344        map->virtual = NULL;
 345        map->bo = bo;
 346        if (num_pages > bo->resource->num_pages)
 347                return -EINVAL;
 348        if ((start_page + num_pages) > bo->resource->num_pages)
 349                return -EINVAL;
 350
 351        ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
 352        if (ret)
 353                return ret;
 354        if (!bo->resource->bus.is_iomem) {
 355                return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
 356        } else {
 357                offset = start_page << PAGE_SHIFT;
 358                size = num_pages << PAGE_SHIFT;
 359                return ttm_bo_ioremap(bo, offset, size, map);
 360        }
 361}
 362EXPORT_SYMBOL(ttm_bo_kmap);
 363
 364void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 365{
 366        if (!map->virtual)
 367                return;
 368        switch (map->bo_kmap_type) {
 369        case ttm_bo_map_iomap:
 370                iounmap(map->virtual);
 371                break;
 372        case ttm_bo_map_vmap:
 373                vunmap(map->virtual);
 374                break;
 375        case ttm_bo_map_kmap:
 376                kunmap(map->page);
 377                break;
 378        case ttm_bo_map_premapped:
 379                break;
 380        default:
 381                BUG();
 382        }
 383        ttm_mem_io_free(map->bo->bdev, map->bo->resource);
 384        map->virtual = NULL;
 385        map->page = NULL;
 386}
 387EXPORT_SYMBOL(ttm_bo_kunmap);
 388
 389int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 390{
 391        struct ttm_resource *mem = bo->resource;
 392        int ret;
 393
 394        ret = ttm_mem_io_reserve(bo->bdev, mem);
 395        if (ret)
 396                return ret;
 397
 398        if (mem->bus.is_iomem) {
 399                void __iomem *vaddr_iomem;
 400
 401                if (mem->bus.addr)
 402                        vaddr_iomem = (void __iomem *)mem->bus.addr;
 403                else if (mem->bus.caching == ttm_write_combined)
 404                        vaddr_iomem = ioremap_wc(mem->bus.offset,
 405                                                 bo->base.size);
 406#ifdef CONFIG_X86
 407                else if (mem->bus.caching == ttm_cached)
 408                        vaddr_iomem = ioremap_cache(mem->bus.offset,
 409                                                  bo->base.size);
 410#endif
 411                else
 412                        vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
 413
 414                if (!vaddr_iomem)
 415                        return -ENOMEM;
 416
 417                dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
 418
 419        } else {
 420                struct ttm_operation_ctx ctx = {
 421                        .interruptible = false,
 422                        .no_wait_gpu = false
 423                };
 424                struct ttm_tt *ttm = bo->ttm;
 425                pgprot_t prot;
 426                void *vaddr;
 427
 428                ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
 429                if (ret)
 430                        return ret;
 431
 432                /*
 433                 * We need to use vmap to get the desired page protection
 434                 * or to make the buffer object look contiguous.
 435                 */
 436                prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
 437                vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
 438                if (!vaddr)
 439                        return -ENOMEM;
 440
 441                dma_buf_map_set_vaddr(map, vaddr);
 442        }
 443
 444        return 0;
 445}
 446EXPORT_SYMBOL(ttm_bo_vmap);
 447
 448void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 449{
 450        struct ttm_resource *mem = bo->resource;
 451
 452        if (dma_buf_map_is_null(map))
 453                return;
 454
 455        if (!map->is_iomem)
 456                vunmap(map->vaddr);
 457        else if (!mem->bus.addr)
 458                iounmap(map->vaddr_iomem);
 459        dma_buf_map_clear(map);
 460
 461        ttm_mem_io_free(bo->bdev, bo->resource);
 462}
 463EXPORT_SYMBOL(ttm_bo_vunmap);
 464
 465static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
 466                                 bool dst_use_tt)
 467{
 468        int ret;
 469        ret = ttm_bo_wait(bo, false, false);
 470        if (ret)
 471                return ret;
 472
 473        if (!dst_use_tt)
 474                ttm_bo_tt_destroy(bo);
 475        ttm_resource_free(bo, &bo->resource);
 476        return 0;
 477}
 478
 479static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
 480                                struct dma_fence *fence,
 481                                bool dst_use_tt)
 482{
 483        struct ttm_buffer_object *ghost_obj;
 484        int ret;
 485
 486        /**
 487         * This should help pipeline ordinary buffer moves.
 488         *
 489         * Hang old buffer memory on a new buffer object,
 490         * and leave it to be released when the GPU
 491         * operation has completed.
 492         */
 493
 494        dma_fence_put(bo->moving);
 495        bo->moving = dma_fence_get(fence);
 496
 497        ret = ttm_buffer_object_transfer(bo, &ghost_obj);
 498        if (ret)
 499                return ret;
 500
 501        dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
 502
 503        /**
 504         * If we're not moving to fixed memory, the TTM object
 505         * needs to stay alive. Otherwhise hang it on the ghost
 506         * bo to be unbound and destroyed.
 507         */
 508
 509        if (dst_use_tt)
 510                ghost_obj->ttm = NULL;
 511        else
 512                bo->ttm = NULL;
 513        bo->resource = NULL;
 514
 515        dma_resv_unlock(&ghost_obj->base._resv);
 516        ttm_bo_put(ghost_obj);
 517        return 0;
 518}
 519
 520static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
 521                                       struct dma_fence *fence)
 522{
 523        struct ttm_device *bdev = bo->bdev;
 524        struct ttm_resource_manager *from;
 525
 526        from = ttm_manager_type(bdev, bo->resource->mem_type);
 527
 528        /**
 529         * BO doesn't have a TTM we need to bind/unbind. Just remember
 530         * this eviction and free up the allocation
 531         */
 532        spin_lock(&from->move_lock);
 533        if (!from->move || dma_fence_is_later(fence, from->move)) {
 534                dma_fence_put(from->move);
 535                from->move = dma_fence_get(fence);
 536        }
 537        spin_unlock(&from->move_lock);
 538
 539        ttm_resource_free(bo, &bo->resource);
 540
 541        dma_fence_put(bo->moving);
 542        bo->moving = dma_fence_get(fence);
 543}
 544
 545int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 546                              struct dma_fence *fence,
 547                              bool evict,
 548                              bool pipeline,
 549                              struct ttm_resource *new_mem)
 550{
 551        struct ttm_device *bdev = bo->bdev;
 552        struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
 553        struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
 554        int ret = 0;
 555
 556        dma_resv_add_excl_fence(bo->base.resv, fence);
 557        if (!evict)
 558                ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
 559        else if (!from->use_tt && pipeline)
 560                ttm_bo_move_pipeline_evict(bo, fence);
 561        else
 562                ret = ttm_bo_wait_free_node(bo, man->use_tt);
 563
 564        if (ret)
 565                return ret;
 566
 567        ttm_bo_assign_mem(bo, new_mem);
 568
 569        return 0;
 570}
 571EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
 572
 573/**
 574 * ttm_bo_pipeline_gutting - purge the contents of a bo
 575 * @bo: The buffer object
 576 *
 577 * Purge the contents of a bo, async if the bo is not idle.
 578 * After a successful call, the bo is left unpopulated in
 579 * system placement. The function may wait uninterruptible
 580 * for idle on OOM.
 581 *
 582 * Return: 0 if successful, negative error code on failure.
 583 */
 584int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
 585{
 586        static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
 587        struct ttm_buffer_object *ghost;
 588        struct ttm_resource *sys_res;
 589        struct ttm_tt *ttm;
 590        int ret;
 591
 592        ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
 593        if (ret)
 594                return ret;
 595
 596        /* If already idle, no need for ghost object dance. */
 597        ret = ttm_bo_wait(bo, false, true);
 598        if (ret != -EBUSY) {
 599                if (!bo->ttm) {
 600                        /* See comment below about clearing. */
 601                        ret = ttm_tt_create(bo, true);
 602                        if (ret)
 603                                goto error_free_sys_mem;
 604                } else {
 605                        ttm_tt_unpopulate(bo->bdev, bo->ttm);
 606                        if (bo->type == ttm_bo_type_device)
 607                                ttm_tt_mark_for_clear(bo->ttm);
 608                }
 609                ttm_resource_free(bo, &bo->resource);
 610                ttm_bo_assign_mem(bo, sys_res);
 611                return 0;
 612        }
 613
 614        /*
 615         * We need an unpopulated ttm_tt after giving our current one,
 616         * if any, to the ghost object. And we can't afford to fail
 617         * creating one *after* the operation. If the bo subsequently gets
 618         * resurrected, make sure it's cleared (if ttm_bo_type_device)
 619         * to avoid leaking sensitive information to user-space.
 620         */
 621
 622        ttm = bo->ttm;
 623        bo->ttm = NULL;
 624        ret = ttm_tt_create(bo, true);
 625        swap(bo->ttm, ttm);
 626        if (ret)
 627                goto error_free_sys_mem;
 628
 629        ret = ttm_buffer_object_transfer(bo, &ghost);
 630        if (ret)
 631                goto error_destroy_tt;
 632
 633        ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
 634        /* Last resort, wait for the BO to be idle when we are OOM */
 635        if (ret)
 636                ttm_bo_wait(bo, false, false);
 637
 638        dma_resv_unlock(&ghost->base._resv);
 639        ttm_bo_put(ghost);
 640        bo->ttm = ttm;
 641        bo->resource = NULL;
 642        ttm_bo_assign_mem(bo, sys_res);
 643        return 0;
 644
 645error_destroy_tt:
 646        ttm_tt_destroy(bo->bdev, ttm);
 647
 648error_free_sys_mem:
 649        ttm_resource_free(bo, &sys_res);
 650        return ret;
 651}
 652