linux/drivers/gpu/drm/ttm/ttm_bo.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2/**************************************************************************
   3 *
   4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30 */
  31
  32#define pr_fmt(fmt) "[TTM] " fmt
  33
  34#include <drm/ttm/ttm_module.h>
  35#include <drm/ttm/ttm_bo_driver.h>
  36#include <drm/ttm/ttm_placement.h>
  37#include <linux/jiffies.h>
  38#include <linux/slab.h>
  39#include <linux/sched.h>
  40#include <linux/mm.h>
  41#include <linux/file.h>
  42#include <linux/module.h>
  43#include <linux/atomic.h>
  44#include <linux/reservation.h>
  45
  46static void ttm_bo_global_kobj_release(struct kobject *kobj);
  47
  48/**
  49 * ttm_global_mutex - protecting the global BO state
  50 */
  51DEFINE_MUTEX(ttm_global_mutex);
  52unsigned ttm_bo_glob_use_count;
  53struct ttm_bo_global ttm_bo_glob;
  54
  55static struct attribute ttm_bo_count = {
  56        .name = "bo_count",
  57        .mode = S_IRUGO
  58};
  59
  60/* default destructor */
  61static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
  62{
  63        kfree(bo);
  64}
  65
  66static inline int ttm_mem_type_from_place(const struct ttm_place *place,
  67                                          uint32_t *mem_type)
  68{
  69        int pos;
  70
  71        pos = ffs(place->flags & TTM_PL_MASK_MEM);
  72        if (unlikely(!pos))
  73                return -EINVAL;
  74
  75        *mem_type = pos - 1;
  76        return 0;
  77}
  78
  79static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
  80                               int mem_type)
  81{
  82        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  83
  84        drm_printf(p, "    has_type: %d\n", man->has_type);
  85        drm_printf(p, "    use_type: %d\n", man->use_type);
  86        drm_printf(p, "    flags: 0x%08X\n", man->flags);
  87        drm_printf(p, "    gpu_offset: 0x%08llX\n", man->gpu_offset);
  88        drm_printf(p, "    size: %llu\n", man->size);
  89        drm_printf(p, "    available_caching: 0x%08X\n", man->available_caching);
  90        drm_printf(p, "    default_caching: 0x%08X\n", man->default_caching);
  91        if (mem_type != TTM_PL_SYSTEM)
  92                (*man->func->debug)(man, p);
  93}
  94
  95static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  96                                        struct ttm_placement *placement)
  97{
  98        struct drm_printer p = drm_debug_printer(TTM_PFX);
  99        int i, ret, mem_type;
 100
 101        drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
 102                   bo, bo->mem.num_pages, bo->mem.size >> 10,
 103                   bo->mem.size >> 20);
 104        for (i = 0; i < placement->num_placement; i++) {
 105                ret = ttm_mem_type_from_place(&placement->placement[i],
 106                                                &mem_type);
 107                if (ret)
 108                        return;
 109                drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
 110                           i, placement->placement[i].flags, mem_type);
 111                ttm_mem_type_debug(bo->bdev, &p, mem_type);
 112        }
 113}
 114
 115static ssize_t ttm_bo_global_show(struct kobject *kobj,
 116                                  struct attribute *attr,
 117                                  char *buffer)
 118{
 119        struct ttm_bo_global *glob =
 120                container_of(kobj, struct ttm_bo_global, kobj);
 121
 122        return snprintf(buffer, PAGE_SIZE, "%d\n",
 123                                atomic_read(&glob->bo_count));
 124}
 125
 126static struct attribute *ttm_bo_global_attrs[] = {
 127        &ttm_bo_count,
 128        NULL
 129};
 130
 131static const struct sysfs_ops ttm_bo_global_ops = {
 132        .show = &ttm_bo_global_show
 133};
 134
 135static struct kobj_type ttm_bo_glob_kobj_type  = {
 136        .release = &ttm_bo_global_kobj_release,
 137        .sysfs_ops = &ttm_bo_global_ops,
 138        .default_attrs = ttm_bo_global_attrs
 139};
 140
 141
 142static inline uint32_t ttm_bo_type_flags(unsigned type)
 143{
 144        return 1 << (type);
 145}
 146
 147static void ttm_bo_release_list(struct kref *list_kref)
 148{
 149        struct ttm_buffer_object *bo =
 150            container_of(list_kref, struct ttm_buffer_object, list_kref);
 151        struct ttm_bo_device *bdev = bo->bdev;
 152        size_t acc_size = bo->acc_size;
 153
 154        BUG_ON(kref_read(&bo->list_kref));
 155        BUG_ON(kref_read(&bo->kref));
 156        BUG_ON(atomic_read(&bo->cpu_writers));
 157        BUG_ON(bo->mem.mm_node != NULL);
 158        BUG_ON(!list_empty(&bo->lru));
 159        BUG_ON(!list_empty(&bo->ddestroy));
 160        ttm_tt_destroy(bo->ttm);
 161        atomic_dec(&bo->bdev->glob->bo_count);
 162        dma_fence_put(bo->moving);
 163        reservation_object_fini(&bo->ttm_resv);
 164        mutex_destroy(&bo->wu_mutex);
 165        bo->destroy(bo);
 166        ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 167}
 168
 169static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 170                                  struct ttm_mem_reg *mem)
 171{
 172        struct ttm_bo_device *bdev = bo->bdev;
 173        struct ttm_mem_type_manager *man;
 174
 175        reservation_object_assert_held(bo->resv);
 176
 177        if (!list_empty(&bo->lru))
 178                return;
 179
 180        if (mem->placement & TTM_PL_FLAG_NO_EVICT)
 181                return;
 182
 183        man = &bdev->man[mem->mem_type];
 184        list_add_tail(&bo->lru, &man->lru[bo->priority]);
 185        kref_get(&bo->list_kref);
 186
 187        if (bo->ttm && !(bo->ttm->page_flags &
 188                         (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
 189                list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
 190                kref_get(&bo->list_kref);
 191        }
 192}
 193
 194void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 195{
 196        ttm_bo_add_mem_to_lru(bo, &bo->mem);
 197}
 198EXPORT_SYMBOL(ttm_bo_add_to_lru);
 199
 200static void ttm_bo_ref_bug(struct kref *list_kref)
 201{
 202        BUG();
 203}
 204
 205void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 206{
 207        struct ttm_bo_device *bdev = bo->bdev;
 208        bool notify = false;
 209
 210        if (!list_empty(&bo->swap)) {
 211                list_del_init(&bo->swap);
 212                kref_put(&bo->list_kref, ttm_bo_ref_bug);
 213                notify = true;
 214        }
 215        if (!list_empty(&bo->lru)) {
 216                list_del_init(&bo->lru);
 217                kref_put(&bo->list_kref, ttm_bo_ref_bug);
 218                notify = true;
 219        }
 220
 221        if (notify && bdev->driver->del_from_lru_notify)
 222                bdev->driver->del_from_lru_notify(bo);
 223}
 224
 225void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
 226{
 227        struct ttm_bo_global *glob = bo->bdev->glob;
 228
 229        spin_lock(&glob->lru_lock);
 230        ttm_bo_del_from_lru(bo);
 231        spin_unlock(&glob->lru_lock);
 232}
 233EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
 234
 235static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
 236                                     struct ttm_buffer_object *bo)
 237{
 238        if (!pos->first)
 239                pos->first = bo;
 240        pos->last = bo;
 241}
 242
 243void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
 244                             struct ttm_lru_bulk_move *bulk)
 245{
 246        reservation_object_assert_held(bo->resv);
 247
 248        ttm_bo_del_from_lru(bo);
 249        ttm_bo_add_to_lru(bo);
 250
 251        if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 252                switch (bo->mem.mem_type) {
 253                case TTM_PL_TT:
 254                        ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
 255                        break;
 256
 257                case TTM_PL_VRAM:
 258                        ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
 259                        break;
 260                }
 261                if (bo->ttm && !(bo->ttm->page_flags &
 262                                 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
 263                        ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
 264        }
 265}
 266EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
 267
 268void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 269{
 270        unsigned i;
 271
 272        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 273                struct ttm_lru_bulk_move_pos *pos = &bulk->tt[i];
 274                struct ttm_mem_type_manager *man;
 275
 276                if (!pos->first)
 277                        continue;
 278
 279                reservation_object_assert_held(pos->first->resv);
 280                reservation_object_assert_held(pos->last->resv);
 281
 282                man = &pos->first->bdev->man[TTM_PL_TT];
 283                list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 284                                    &pos->last->lru);
 285        }
 286
 287        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 288                struct ttm_lru_bulk_move_pos *pos = &bulk->vram[i];
 289                struct ttm_mem_type_manager *man;
 290
 291                if (!pos->first)
 292                        continue;
 293
 294                reservation_object_assert_held(pos->first->resv);
 295                reservation_object_assert_held(pos->last->resv);
 296
 297                man = &pos->first->bdev->man[TTM_PL_VRAM];
 298                list_bulk_move_tail(&man->lru[i], &pos->first->lru,
 299                                    &pos->last->lru);
 300        }
 301
 302        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 303                struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i];
 304                struct list_head *lru;
 305
 306                if (!pos->first)
 307                        continue;
 308
 309                reservation_object_assert_held(pos->first->resv);
 310                reservation_object_assert_held(pos->last->resv);
 311
 312                lru = &pos->first->bdev->glob->swap_lru[i];
 313                list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
 314        }
 315}
 316EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail);
 317
 318static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 319                                  struct ttm_mem_reg *mem, bool evict,
 320                                  struct ttm_operation_ctx *ctx)
 321{
 322        struct ttm_bo_device *bdev = bo->bdev;
 323        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
 324        bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
 325        struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
 326        struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
 327        int ret = 0;
 328
 329        if (old_is_pci || new_is_pci ||
 330            ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
 331                ret = ttm_mem_io_lock(old_man, true);
 332                if (unlikely(ret != 0))
 333                        goto out_err;
 334                ttm_bo_unmap_virtual_locked(bo);
 335                ttm_mem_io_unlock(old_man);
 336        }
 337
 338        /*
 339         * Create and bind a ttm if required.
 340         */
 341
 342        if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
 343                if (bo->ttm == NULL) {
 344                        bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
 345                        ret = ttm_tt_create(bo, zero);
 346                        if (ret)
 347                                goto out_err;
 348                }
 349
 350                ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
 351                if (ret)
 352                        goto out_err;
 353
 354                if (mem->mem_type != TTM_PL_SYSTEM) {
 355                        ret = ttm_tt_bind(bo->ttm, mem, ctx);
 356                        if (ret)
 357                                goto out_err;
 358                }
 359
 360                if (bo->mem.mem_type == TTM_PL_SYSTEM) {
 361                        if (bdev->driver->move_notify)
 362                                bdev->driver->move_notify(bo, evict, mem);
 363                        bo->mem = *mem;
 364                        mem->mm_node = NULL;
 365                        goto moved;
 366                }
 367        }
 368
 369        if (bdev->driver->move_notify)
 370                bdev->driver->move_notify(bo, evict, mem);
 371
 372        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
 373            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
 374                ret = ttm_bo_move_ttm(bo, ctx, mem);
 375        else if (bdev->driver->move)
 376                ret = bdev->driver->move(bo, evict, ctx, mem);
 377        else
 378                ret = ttm_bo_move_memcpy(bo, ctx, mem);
 379
 380        if (ret) {
 381                if (bdev->driver->move_notify) {
 382                        swap(*mem, bo->mem);
 383                        bdev->driver->move_notify(bo, false, mem);
 384                        swap(*mem, bo->mem);
 385                }
 386
 387                goto out_err;
 388        }
 389
 390moved:
 391        if (bo->evicted) {
 392                if (bdev->driver->invalidate_caches) {
 393                        ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
 394                        if (ret)
 395                                pr_err("Can not flush read caches\n");
 396                }
 397                bo->evicted = false;
 398        }
 399
 400        if (bo->mem.mm_node)
 401                bo->offset = (bo->mem.start << PAGE_SHIFT) +
 402                    bdev->man[bo->mem.mem_type].gpu_offset;
 403        else
 404                bo->offset = 0;
 405
 406        ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
 407        return 0;
 408
 409out_err:
 410        new_man = &bdev->man[bo->mem.mem_type];
 411        if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 412                ttm_tt_destroy(bo->ttm);
 413                bo->ttm = NULL;
 414        }
 415
 416        return ret;
 417}
 418
 419/**
 420 * Call bo::reserved.
 421 * Will release GPU memory type usage on destruction.
 422 * This is the place to put in driver specific hooks to release
 423 * driver private resources.
 424 * Will release the bo::reserved lock.
 425 */
 426
 427static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
 428{
 429        if (bo->bdev->driver->move_notify)
 430                bo->bdev->driver->move_notify(bo, false, NULL);
 431
 432        ttm_tt_destroy(bo->ttm);
 433        bo->ttm = NULL;
 434        ttm_bo_mem_put(bo, &bo->mem);
 435}
 436
 437static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 438{
 439        int r;
 440
 441        if (bo->resv == &bo->ttm_resv)
 442                return 0;
 443
 444        BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
 445
 446        r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
 447        if (r)
 448                reservation_object_unlock(&bo->ttm_resv);
 449
 450        return r;
 451}
 452
 453static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 454{
 455        struct reservation_object_list *fobj;
 456        struct dma_fence *fence;
 457        int i;
 458
 459        fobj = reservation_object_get_list(&bo->ttm_resv);
 460        fence = reservation_object_get_excl(&bo->ttm_resv);
 461        if (fence && !fence->ops->signaled)
 462                dma_fence_enable_sw_signaling(fence);
 463
 464        for (i = 0; fobj && i < fobj->shared_count; ++i) {
 465                fence = rcu_dereference_protected(fobj->shared[i],
 466                                        reservation_object_held(bo->resv));
 467
 468                if (!fence->ops->signaled)
 469                        dma_fence_enable_sw_signaling(fence);
 470        }
 471}
 472
 473static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 474{
 475        struct ttm_bo_device *bdev = bo->bdev;
 476        struct ttm_bo_global *glob = bdev->glob;
 477        int ret;
 478
 479        ret = ttm_bo_individualize_resv(bo);
 480        if (ret) {
 481                /* Last resort, if we fail to allocate memory for the
 482                 * fences block for the BO to become idle
 483                 */
 484                reservation_object_wait_timeout_rcu(bo->resv, true, false,
 485                                                    30 * HZ);
 486                spin_lock(&glob->lru_lock);
 487                goto error;
 488        }
 489
 490        spin_lock(&glob->lru_lock);
 491        ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
 492        if (!ret) {
 493                if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
 494                        ttm_bo_del_from_lru(bo);
 495                        spin_unlock(&glob->lru_lock);
 496                        if (bo->resv != &bo->ttm_resv)
 497                                reservation_object_unlock(&bo->ttm_resv);
 498
 499                        ttm_bo_cleanup_memtype_use(bo);
 500                        reservation_object_unlock(bo->resv);
 501                        return;
 502                }
 503
 504                ttm_bo_flush_all_fences(bo);
 505
 506                /*
 507                 * Make NO_EVICT bos immediately available to
 508                 * shrinkers, now that they are queued for
 509                 * destruction.
 510                 */
 511                if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
 512                        bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
 513                        ttm_bo_add_to_lru(bo);
 514                }
 515
 516                reservation_object_unlock(bo->resv);
 517        }
 518        if (bo->resv != &bo->ttm_resv)
 519                reservation_object_unlock(&bo->ttm_resv);
 520
 521error:
 522        kref_get(&bo->list_kref);
 523        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 524        spin_unlock(&glob->lru_lock);
 525
 526        schedule_delayed_work(&bdev->wq,
 527                              ((HZ / 100) < 1) ? 1 : HZ / 100);
 528}
 529
 530/**
 531 * function ttm_bo_cleanup_refs
 532 * If bo idle, remove from delayed- and lru lists, and unref.
 533 * If not idle, do nothing.
 534 *
 535 * Must be called with lru_lock and reservation held, this function
 536 * will drop the lru lock and optionally the reservation lock before returning.
 537 *
 538 * @interruptible         Any sleeps should occur interruptibly.
 539 * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
 540 * @unlock_resv           Unlock the reservation lock as well.
 541 */
 542
 543static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 544                               bool interruptible, bool no_wait_gpu,
 545                               bool unlock_resv)
 546{
 547        struct ttm_bo_global *glob = bo->bdev->glob;
 548        struct reservation_object *resv;
 549        int ret;
 550
 551        if (unlikely(list_empty(&bo->ddestroy)))
 552                resv = bo->resv;
 553        else
 554                resv = &bo->ttm_resv;
 555
 556        if (reservation_object_test_signaled_rcu(resv, true))
 557                ret = 0;
 558        else
 559                ret = -EBUSY;
 560
 561        if (ret && !no_wait_gpu) {
 562                long lret;
 563
 564                if (unlock_resv)
 565                        reservation_object_unlock(bo->resv);
 566                spin_unlock(&glob->lru_lock);
 567
 568                lret = reservation_object_wait_timeout_rcu(resv, true,
 569                                                           interruptible,
 570                                                           30 * HZ);
 571
 572                if (lret < 0)
 573                        return lret;
 574                else if (lret == 0)
 575                        return -EBUSY;
 576
 577                spin_lock(&glob->lru_lock);
 578                if (unlock_resv && !reservation_object_trylock(bo->resv)) {
 579                        /*
 580                         * We raced, and lost, someone else holds the reservation now,
 581                         * and is probably busy in ttm_bo_cleanup_memtype_use.
 582                         *
 583                         * Even if it's not the case, because we finished waiting any
 584                         * delayed destruction would succeed, so just return success
 585                         * here.
 586                         */
 587                        spin_unlock(&glob->lru_lock);
 588                        return 0;
 589                }
 590                ret = 0;
 591        }
 592
 593        if (ret || unlikely(list_empty(&bo->ddestroy))) {
 594                if (unlock_resv)
 595                        reservation_object_unlock(bo->resv);
 596                spin_unlock(&glob->lru_lock);
 597                return ret;
 598        }
 599
 600        ttm_bo_del_from_lru(bo);
 601        list_del_init(&bo->ddestroy);
 602        kref_put(&bo->list_kref, ttm_bo_ref_bug);
 603
 604        spin_unlock(&glob->lru_lock);
 605        ttm_bo_cleanup_memtype_use(bo);
 606
 607        if (unlock_resv)
 608                reservation_object_unlock(bo->resv);
 609
 610        return 0;
 611}
 612
 613/**
 614 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
 615 * encountered buffers.
 616 */
 617static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 618{
 619        struct ttm_bo_global *glob = bdev->glob;
 620        struct list_head removed;
 621        bool empty;
 622
 623        INIT_LIST_HEAD(&removed);
 624
 625        spin_lock(&glob->lru_lock);
 626        while (!list_empty(&bdev->ddestroy)) {
 627                struct ttm_buffer_object *bo;
 628
 629                bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
 630                                      ddestroy);
 631                kref_get(&bo->list_kref);
 632                list_move_tail(&bo->ddestroy, &removed);
 633
 634                if (remove_all || bo->resv != &bo->ttm_resv) {
 635                        spin_unlock(&glob->lru_lock);
 636                        reservation_object_lock(bo->resv, NULL);
 637
 638                        spin_lock(&glob->lru_lock);
 639                        ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 640
 641                } else if (reservation_object_trylock(bo->resv)) {
 642                        ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 643                } else {
 644                        spin_unlock(&glob->lru_lock);
 645                }
 646
 647                kref_put(&bo->list_kref, ttm_bo_release_list);
 648                spin_lock(&glob->lru_lock);
 649        }
 650        list_splice_tail(&removed, &bdev->ddestroy);
 651        empty = list_empty(&bdev->ddestroy);
 652        spin_unlock(&glob->lru_lock);
 653
 654        return empty;
 655}
 656
 657static void ttm_bo_delayed_workqueue(struct work_struct *work)
 658{
 659        struct ttm_bo_device *bdev =
 660            container_of(work, struct ttm_bo_device, wq.work);
 661
 662        if (!ttm_bo_delayed_delete(bdev, false))
 663                schedule_delayed_work(&bdev->wq,
 664                                      ((HZ / 100) < 1) ? 1 : HZ / 100);
 665}
 666
 667static void ttm_bo_release(struct kref *kref)
 668{
 669        struct ttm_buffer_object *bo =
 670            container_of(kref, struct ttm_buffer_object, kref);
 671        struct ttm_bo_device *bdev = bo->bdev;
 672        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 673
 674        drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
 675        ttm_mem_io_lock(man, false);
 676        ttm_mem_io_free_vm(bo);
 677        ttm_mem_io_unlock(man);
 678        ttm_bo_cleanup_refs_or_queue(bo);
 679        kref_put(&bo->list_kref, ttm_bo_release_list);
 680}
 681
 682void ttm_bo_put(struct ttm_buffer_object *bo)
 683{
 684        kref_put(&bo->kref, ttm_bo_release);
 685}
 686EXPORT_SYMBOL(ttm_bo_put);
 687
 688int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
 689{
 690        return cancel_delayed_work_sync(&bdev->wq);
 691}
 692EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 693
 694void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 695{
 696        if (resched)
 697                schedule_delayed_work(&bdev->wq,
 698                                      ((HZ / 100) < 1) ? 1 : HZ / 100);
 699}
 700EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 701
 702static int ttm_bo_evict(struct ttm_buffer_object *bo,
 703                        struct ttm_operation_ctx *ctx)
 704{
 705        struct ttm_bo_device *bdev = bo->bdev;
 706        struct ttm_mem_reg evict_mem;
 707        struct ttm_placement placement;
 708        int ret = 0;
 709
 710        reservation_object_assert_held(bo->resv);
 711
 712        placement.num_placement = 0;
 713        placement.num_busy_placement = 0;
 714        bdev->driver->evict_flags(bo, &placement);
 715
 716        if (!placement.num_placement && !placement.num_busy_placement) {
 717                ret = ttm_bo_pipeline_gutting(bo);
 718                if (ret)
 719                        return ret;
 720
 721                return ttm_tt_create(bo, false);
 722        }
 723
 724        evict_mem = bo->mem;
 725        evict_mem.mm_node = NULL;
 726        evict_mem.bus.io_reserved_vm = false;
 727        evict_mem.bus.io_reserved_count = 0;
 728
 729        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
 730        if (ret) {
 731                if (ret != -ERESTARTSYS) {
 732                        pr_err("Failed to find memory space for buffer 0x%p eviction\n",
 733                               bo);
 734                        ttm_bo_mem_space_debug(bo, &placement);
 735                }
 736                goto out;
 737        }
 738
 739        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx);
 740        if (unlikely(ret)) {
 741                if (ret != -ERESTARTSYS)
 742                        pr_err("Buffer eviction failed\n");
 743                ttm_bo_mem_put(bo, &evict_mem);
 744                goto out;
 745        }
 746        bo->evicted = true;
 747out:
 748        return ret;
 749}
 750
 751bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 752                              const struct ttm_place *place)
 753{
 754        /* Don't evict this BO if it's outside of the
 755         * requested placement range
 756         */
 757        if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
 758            (place->lpfn && place->lpfn <= bo->mem.start))
 759                return false;
 760
 761        return true;
 762}
 763EXPORT_SYMBOL(ttm_bo_eviction_valuable);
 764
 765/**
 766 * Check the target bo is allowable to be evicted or swapout, including cases:
 767 *
 768 * a. if share same reservation object with ctx->resv, have assumption
 769 * reservation objects should already be locked, so not lock again and
 770 * return true directly when either the opreation allow_reserved_eviction
 771 * or the target bo already is in delayed free list;
 772 *
 773 * b. Otherwise, trylock it.
 774 */
 775static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 776                        struct ttm_operation_ctx *ctx, bool *locked, bool *busy)
 777{
 778        bool ret = false;
 779
 780        if (bo->resv == ctx->resv) {
 781                reservation_object_assert_held(bo->resv);
 782                if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
 783                    || !list_empty(&bo->ddestroy))
 784                        ret = true;
 785                *locked = false;
 786                if (busy)
 787                        *busy = false;
 788        } else {
 789                ret = reservation_object_trylock(bo->resv);
 790                *locked = ret;
 791                if (busy)
 792                        *busy = !ret;
 793        }
 794
 795        return ret;
 796}
 797
 798/**
 799 * ttm_mem_evict_wait_busy - wait for a busy BO to become available
 800 *
 801 * @busy_bo: BO which couldn't be locked with trylock
 802 * @ctx: operation context
 803 * @ticket: acquire ticket
 804 *
 805 * Try to lock a busy buffer object to avoid failing eviction.
 806 */
 807static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
 808                                   struct ttm_operation_ctx *ctx,
 809                                   struct ww_acquire_ctx *ticket)
 810{
 811        int r;
 812
 813        if (!busy_bo || !ticket)
 814                return -EBUSY;
 815
 816        if (ctx->interruptible)
 817                r = reservation_object_lock_interruptible(busy_bo->resv,
 818                                                          ticket);
 819        else
 820                r = reservation_object_lock(busy_bo->resv, ticket);
 821
 822        /*
 823         * TODO: It would be better to keep the BO locked until allocation is at
 824         * least tried one more time, but that would mean a much larger rework
 825         * of TTM.
 826         */
 827        if (!r)
 828                reservation_object_unlock(busy_bo->resv);
 829
 830        return r == -EDEADLK ? -EBUSY : r;
 831}
 832
 833static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 834                               uint32_t mem_type,
 835                               const struct ttm_place *place,
 836                               struct ttm_operation_ctx *ctx,
 837                               struct ww_acquire_ctx *ticket)
 838{
 839        struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
 840        struct ttm_bo_global *glob = bdev->glob;
 841        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 842        bool locked = false;
 843        unsigned i;
 844        int ret;
 845
 846        spin_lock(&glob->lru_lock);
 847        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 848                list_for_each_entry(bo, &man->lru[i], lru) {
 849                        bool busy;
 850
 851                        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
 852                                                            &busy)) {
 853                                if (busy && !busy_bo &&
 854                                    bo->resv->lock.ctx != ticket)
 855                                        busy_bo = bo;
 856                                continue;
 857                        }
 858
 859                        if (place && !bdev->driver->eviction_valuable(bo,
 860                                                                      place)) {
 861                                if (locked)
 862                                        reservation_object_unlock(bo->resv);
 863                                continue;
 864                        }
 865                        break;
 866                }
 867
 868                /* If the inner loop terminated early, we have our candidate */
 869                if (&bo->lru != &man->lru[i])
 870                        break;
 871
 872                bo = NULL;
 873        }
 874
 875        if (!bo) {
 876                if (busy_bo)
 877                        ttm_bo_get(busy_bo);
 878                spin_unlock(&glob->lru_lock);
 879                ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
 880                if (busy_bo)
 881                        ttm_bo_put(busy_bo);
 882                return ret;
 883        }
 884
 885        kref_get(&bo->list_kref);
 886
 887        if (!list_empty(&bo->ddestroy)) {
 888                ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
 889                                          ctx->no_wait_gpu, locked);
 890                kref_put(&bo->list_kref, ttm_bo_release_list);
 891                return ret;
 892        }
 893
 894        ttm_bo_del_from_lru(bo);
 895        spin_unlock(&glob->lru_lock);
 896
 897        ret = ttm_bo_evict(bo, ctx);
 898        if (locked) {
 899                ttm_bo_unreserve(bo);
 900        } else {
 901                spin_lock(&glob->lru_lock);
 902                ttm_bo_add_to_lru(bo);
 903                spin_unlock(&glob->lru_lock);
 904        }
 905
 906        kref_put(&bo->list_kref, ttm_bo_release_list);
 907        return ret;
 908}
 909
 910void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 911{
 912        struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 913
 914        if (mem->mm_node)
 915                (*man->func->put_node)(man, mem);
 916}
 917EXPORT_SYMBOL(ttm_bo_mem_put);
 918
 919/**
 920 * Add the last move fence to the BO and reserve a new shared slot.
 921 */
 922static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 923                                 struct ttm_mem_type_manager *man,
 924                                 struct ttm_mem_reg *mem)
 925{
 926        struct dma_fence *fence;
 927        int ret;
 928
 929        spin_lock(&man->move_lock);
 930        fence = dma_fence_get(man->move);
 931        spin_unlock(&man->move_lock);
 932
 933        if (fence) {
 934                reservation_object_add_shared_fence(bo->resv, fence);
 935
 936                ret = reservation_object_reserve_shared(bo->resv, 1);
 937                if (unlikely(ret)) {
 938                        dma_fence_put(fence);
 939                        return ret;
 940                }
 941
 942                dma_fence_put(bo->moving);
 943                bo->moving = fence;
 944        }
 945
 946        return 0;
 947}
 948
 949/**
 950 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 951 * space, or we've evicted everything and there isn't enough space.
 952 */
 953static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
 954                                  const struct ttm_place *place,
 955                                  struct ttm_mem_reg *mem,
 956                                  struct ttm_operation_ctx *ctx)
 957{
 958        struct ttm_bo_device *bdev = bo->bdev;
 959        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 960        int ret;
 961
 962        do {
 963                ret = (*man->func->get_node)(man, bo, place, mem);
 964                if (unlikely(ret != 0))
 965                        return ret;
 966                if (mem->mm_node)
 967                        break;
 968                ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
 969                                          bo->resv->lock.ctx);
 970                if (unlikely(ret != 0))
 971                        return ret;
 972        } while (1);
 973
 974        return ttm_bo_add_move_fence(bo, man, mem);
 975}
 976
 977static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 978                                      uint32_t cur_placement,
 979                                      uint32_t proposed_placement)
 980{
 981        uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
 982        uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
 983
 984        /**
 985         * Keep current caching if possible.
 986         */
 987
 988        if ((cur_placement & caching) != 0)
 989                result |= (cur_placement & caching);
 990        else if ((man->default_caching & caching) != 0)
 991                result |= man->default_caching;
 992        else if ((TTM_PL_FLAG_CACHED & caching) != 0)
 993                result |= TTM_PL_FLAG_CACHED;
 994        else if ((TTM_PL_FLAG_WC & caching) != 0)
 995                result |= TTM_PL_FLAG_WC;
 996        else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
 997                result |= TTM_PL_FLAG_UNCACHED;
 998
 999        return result;
1000}
1001
1002static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
1003                                 uint32_t mem_type,
1004                                 const struct ttm_place *place,
1005                                 uint32_t *masked_placement)
1006{
1007        uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1008
1009        if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
1010                return false;
1011
1012        if ((place->flags & man->available_caching) == 0)
1013                return false;
1014
1015        cur_flags |= (place->flags & man->available_caching);
1016
1017        *masked_placement = cur_flags;
1018        return true;
1019}
1020
1021/**
1022 * ttm_bo_mem_placement - check if placement is compatible
1023 * @bo: BO to find memory for
1024 * @place: where to search
1025 * @mem: the memory object to fill in
1026 * @ctx: operation context
1027 *
1028 * Check if placement is compatible and fill in mem structure.
1029 * Returns -EBUSY if placement won't work or negative error code.
1030 * 0 when placement can be used.
1031 */
1032static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
1033                                const struct ttm_place *place,
1034                                struct ttm_mem_reg *mem,
1035                                struct ttm_operation_ctx *ctx)
1036{
1037        struct ttm_bo_device *bdev = bo->bdev;
1038        uint32_t mem_type = TTM_PL_SYSTEM;
1039        struct ttm_mem_type_manager *man;
1040        uint32_t cur_flags = 0;
1041        int ret;
1042
1043        ret = ttm_mem_type_from_place(place, &mem_type);
1044        if (ret)
1045                return ret;
1046
1047        man = &bdev->man[mem_type];
1048        if (!man->has_type || !man->use_type)
1049                return -EBUSY;
1050
1051        if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
1052                return -EBUSY;
1053
1054        cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
1055        /*
1056         * Use the access and other non-mapping-related flag bits from
1057         * the memory placement flags to the current flags
1058         */
1059        ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
1060
1061        mem->mem_type = mem_type;
1062        mem->placement = cur_flags;
1063
1064        if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
1065                spin_lock(&bo->bdev->glob->lru_lock);
1066                ttm_bo_del_from_lru(bo);
1067                ttm_bo_add_mem_to_lru(bo, mem);
1068                spin_unlock(&bo->bdev->glob->lru_lock);
1069        }
1070
1071        return 0;
1072}
1073
1074/**
1075 * Creates space for memory region @mem according to its type.
1076 *
1077 * This function first searches for free space in compatible memory types in
1078 * the priority order defined by the driver.  If free space isn't found, then
1079 * ttm_bo_mem_force_space is attempted in priority order to evict and find
1080 * space.
1081 */
1082int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1083                        struct ttm_placement *placement,
1084                        struct ttm_mem_reg *mem,
1085                        struct ttm_operation_ctx *ctx)
1086{
1087        struct ttm_bo_device *bdev = bo->bdev;
1088        bool type_found = false;
1089        int i, ret;
1090
1091        ret = reservation_object_reserve_shared(bo->resv, 1);
1092        if (unlikely(ret))
1093                return ret;
1094
1095        mem->mm_node = NULL;
1096        for (i = 0; i < placement->num_placement; ++i) {
1097                const struct ttm_place *place = &placement->placement[i];
1098                struct ttm_mem_type_manager *man;
1099
1100                ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1101                if (ret == -EBUSY)
1102                        continue;
1103                if (ret)
1104                        goto error;
1105
1106                type_found = true;
1107                mem->mm_node = NULL;
1108                if (mem->mem_type == TTM_PL_SYSTEM)
1109                        return 0;
1110
1111                man = &bdev->man[mem->mem_type];
1112                ret = (*man->func->get_node)(man, bo, place, mem);
1113                if (unlikely(ret))
1114                        goto error;
1115
1116                if (mem->mm_node) {
1117                        ret = ttm_bo_add_move_fence(bo, man, mem);
1118                        if (unlikely(ret)) {
1119                                (*man->func->put_node)(man, mem);
1120                                goto error;
1121                        }
1122                        return 0;
1123                }
1124        }
1125
1126        for (i = 0; i < placement->num_busy_placement; ++i) {
1127                const struct ttm_place *place = &placement->busy_placement[i];
1128
1129                ret = ttm_bo_mem_placement(bo, place, mem, ctx);
1130                if (ret == -EBUSY)
1131                        continue;
1132                if (ret)
1133                        goto error;
1134
1135                type_found = true;
1136                mem->mm_node = NULL;
1137                if (mem->mem_type == TTM_PL_SYSTEM)
1138                        return 0;
1139
1140                ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
1141                if (ret == 0 && mem->mm_node)
1142                        return 0;
1143
1144                if (ret && ret != -EBUSY)
1145                        goto error;
1146        }
1147
1148        ret = -ENOMEM;
1149        if (!type_found) {
1150                pr_err(TTM_PFX "No compatible memory type found\n");
1151                ret = -EINVAL;
1152        }
1153
1154error:
1155        if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
1156                spin_lock(&bo->bdev->glob->lru_lock);
1157                ttm_bo_move_to_lru_tail(bo, NULL);
1158                spin_unlock(&bo->bdev->glob->lru_lock);
1159        }
1160
1161        return ret;
1162}
1163EXPORT_SYMBOL(ttm_bo_mem_space);
1164
1165static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1166                              struct ttm_placement *placement,
1167                              struct ttm_operation_ctx *ctx)
1168{
1169        int ret = 0;
1170        struct ttm_mem_reg mem;
1171
1172        reservation_object_assert_held(bo->resv);
1173
1174        mem.num_pages = bo->num_pages;
1175        mem.size = mem.num_pages << PAGE_SHIFT;
1176        mem.page_alignment = bo->mem.page_alignment;
1177        mem.bus.io_reserved_vm = false;
1178        mem.bus.io_reserved_count = 0;
1179        /*
1180         * Determine where to move the buffer.
1181         */
1182        ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
1183        if (ret)
1184                goto out_unlock;
1185        ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx);
1186out_unlock:
1187        if (ret && mem.mm_node)
1188                ttm_bo_mem_put(bo, &mem);
1189        return ret;
1190}
1191
1192static bool ttm_bo_places_compat(const struct ttm_place *places,
1193                                 unsigned num_placement,
1194                                 struct ttm_mem_reg *mem,
1195                                 uint32_t *new_flags)
1196{
1197        unsigned i;
1198
1199        for (i = 0; i < num_placement; i++) {
1200                const struct ttm_place *heap = &places[i];
1201
1202                if (mem->mm_node && (mem->start < heap->fpfn ||
1203                     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1204                        continue;
1205
1206                *new_flags = heap->flags;
1207                if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
1208                    (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1209                    (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1210                     (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
1211                        return true;
1212        }
1213        return false;
1214}
1215
1216bool ttm_bo_mem_compat(struct ttm_placement *placement,
1217                       struct ttm_mem_reg *mem,
1218                       uint32_t *new_flags)
1219{
1220        if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1221                                 mem, new_flags))
1222                return true;
1223
1224        if ((placement->busy_placement != placement->placement ||
1225             placement->num_busy_placement > placement->num_placement) &&
1226            ttm_bo_places_compat(placement->busy_placement,
1227                                 placement->num_busy_placement,
1228                                 mem, new_flags))
1229                return true;
1230
1231        return false;
1232}
1233EXPORT_SYMBOL(ttm_bo_mem_compat);
1234
1235int ttm_bo_validate(struct ttm_buffer_object *bo,
1236                    struct ttm_placement *placement,
1237                    struct ttm_operation_ctx *ctx)
1238{
1239        int ret;
1240        uint32_t new_flags;
1241
1242        reservation_object_assert_held(bo->resv);
1243        /*
1244         * Check whether we need to move buffer.
1245         */
1246        if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
1247                ret = ttm_bo_move_buffer(bo, placement, ctx);
1248                if (ret)
1249                        return ret;
1250        } else {
1251                /*
1252                 * Use the access and other non-mapping-related flag bits from
1253                 * the compatible memory placement flags to the active flags
1254                 */
1255                ttm_flag_masked(&bo->mem.placement, new_flags,
1256                                ~TTM_PL_MASK_MEMTYPE);
1257        }
1258        /*
1259         * We might need to add a TTM.
1260         */
1261        if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1262                ret = ttm_tt_create(bo, true);
1263                if (ret)
1264                        return ret;
1265        }
1266        return 0;
1267}
1268EXPORT_SYMBOL(ttm_bo_validate);
1269
1270int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1271                         struct ttm_buffer_object *bo,
1272                         unsigned long size,
1273                         enum ttm_bo_type type,
1274                         struct ttm_placement *placement,
1275                         uint32_t page_alignment,
1276                         struct ttm_operation_ctx *ctx,
1277                         size_t acc_size,
1278                         struct sg_table *sg,
1279                         struct reservation_object *resv,
1280                         void (*destroy) (struct ttm_buffer_object *))
1281{
1282        int ret = 0;
1283        unsigned long num_pages;
1284        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1285        bool locked;
1286
1287        ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
1288        if (ret) {
1289                pr_err("Out of kernel memory\n");
1290                if (destroy)
1291                        (*destroy)(bo);
1292                else
1293                        kfree(bo);
1294                return -ENOMEM;
1295        }
1296
1297        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1298        if (num_pages == 0) {
1299                pr_err("Illegal buffer object size\n");
1300                if (destroy)
1301                        (*destroy)(bo);
1302                else
1303                        kfree(bo);
1304                ttm_mem_global_free(mem_glob, acc_size);
1305                return -EINVAL;
1306        }
1307        bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
1308
1309        kref_init(&bo->kref);
1310        kref_init(&bo->list_kref);
1311        atomic_set(&bo->cpu_writers, 0);
1312        INIT_LIST_HEAD(&bo->lru);
1313        INIT_LIST_HEAD(&bo->ddestroy);
1314        INIT_LIST_HEAD(&bo->swap);
1315        INIT_LIST_HEAD(&bo->io_reserve_lru);
1316        mutex_init(&bo->wu_mutex);
1317        bo->bdev = bdev;
1318        bo->type = type;
1319        bo->num_pages = num_pages;
1320        bo->mem.size = num_pages << PAGE_SHIFT;
1321        bo->mem.mem_type = TTM_PL_SYSTEM;
1322        bo->mem.num_pages = bo->num_pages;
1323        bo->mem.mm_node = NULL;
1324        bo->mem.page_alignment = page_alignment;
1325        bo->mem.bus.io_reserved_vm = false;
1326        bo->mem.bus.io_reserved_count = 0;
1327        bo->moving = NULL;
1328        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1329        bo->acc_size = acc_size;
1330        bo->sg = sg;
1331        if (resv) {
1332                bo->resv = resv;
1333                reservation_object_assert_held(bo->resv);
1334        } else {
1335                bo->resv = &bo->ttm_resv;
1336        }
1337        reservation_object_init(&bo->ttm_resv);
1338        atomic_inc(&bo->bdev->glob->bo_count);
1339        drm_vma_node_reset(&bo->vma_node);
1340
1341        /*
1342         * For ttm_bo_type_device buffers, allocate
1343         * address space from the device.
1344         */
1345        if (bo->type == ttm_bo_type_device ||
1346            bo->type == ttm_bo_type_sg)
1347                ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1348                                         bo->mem.num_pages);
1349
1350        /* passed reservation objects should already be locked,
1351         * since otherwise lockdep will be angered in radeon.
1352         */
1353        if (!resv) {
1354                locked = reservation_object_trylock(bo->resv);
1355                WARN_ON(!locked);
1356        }
1357
1358        if (likely(!ret))
1359                ret = ttm_bo_validate(bo, placement, ctx);
1360
1361        if (unlikely(ret)) {
1362                if (!resv)
1363                        ttm_bo_unreserve(bo);
1364
1365                ttm_bo_put(bo);
1366                return ret;
1367        }
1368
1369        if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1370                spin_lock(&bdev->glob->lru_lock);
1371                ttm_bo_add_to_lru(bo);
1372                spin_unlock(&bdev->glob->lru_lock);
1373        }
1374
1375        return ret;
1376}
1377EXPORT_SYMBOL(ttm_bo_init_reserved);
1378
1379int ttm_bo_init(struct ttm_bo_device *bdev,
1380                struct ttm_buffer_object *bo,
1381                unsigned long size,
1382                enum ttm_bo_type type,
1383                struct ttm_placement *placement,
1384                uint32_t page_alignment,
1385                bool interruptible,
1386                size_t acc_size,
1387                struct sg_table *sg,
1388                struct reservation_object *resv,
1389                void (*destroy) (struct ttm_buffer_object *))
1390{
1391        struct ttm_operation_ctx ctx = { interruptible, false };
1392        int ret;
1393
1394        ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1395                                   page_alignment, &ctx, acc_size,
1396                                   sg, resv, destroy);
1397        if (ret)
1398                return ret;
1399
1400        if (!resv)
1401                ttm_bo_unreserve(bo);
1402
1403        return 0;
1404}
1405EXPORT_SYMBOL(ttm_bo_init);
1406
1407size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1408                       unsigned long bo_size,
1409                       unsigned struct_size)
1410{
1411        unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1412        size_t size = 0;
1413
1414        size += ttm_round_pot(struct_size);
1415        size += ttm_round_pot(npages * sizeof(void *));
1416        size += ttm_round_pot(sizeof(struct ttm_tt));
1417        return size;
1418}
1419EXPORT_SYMBOL(ttm_bo_acc_size);
1420
1421size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1422                           unsigned long bo_size,
1423                           unsigned struct_size)
1424{
1425        unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1426        size_t size = 0;
1427
1428        size += ttm_round_pot(struct_size);
1429        size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
1430        size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1431        return size;
1432}
1433EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1434
1435int ttm_bo_create(struct ttm_bo_device *bdev,
1436                        unsigned long size,
1437                        enum ttm_bo_type type,
1438                        struct ttm_placement *placement,
1439                        uint32_t page_alignment,
1440                        bool interruptible,
1441                        struct ttm_buffer_object **p_bo)
1442{
1443        struct ttm_buffer_object *bo;
1444        size_t acc_size;
1445        int ret;
1446
1447        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1448        if (unlikely(bo == NULL))
1449                return -ENOMEM;
1450
1451        acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1452        ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1453                          interruptible, acc_size,
1454                          NULL, NULL, NULL);
1455        if (likely(ret == 0))
1456                *p_bo = bo;
1457
1458        return ret;
1459}
1460EXPORT_SYMBOL(ttm_bo_create);
1461
1462static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1463                                   unsigned mem_type)
1464{
1465        struct ttm_operation_ctx ctx = {
1466                .interruptible = false,
1467                .no_wait_gpu = false,
1468                .flags = TTM_OPT_FLAG_FORCE_ALLOC
1469        };
1470        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1471        struct ttm_bo_global *glob = bdev->glob;
1472        struct dma_fence *fence;
1473        int ret;
1474        unsigned i;
1475
1476        /*
1477         * Can't use standard list traversal since we're unlocking.
1478         */
1479
1480        spin_lock(&glob->lru_lock);
1481        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1482                while (!list_empty(&man->lru[i])) {
1483                        spin_unlock(&glob->lru_lock);
1484                        ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
1485                                                  NULL);
1486                        if (ret)
1487                                return ret;
1488                        spin_lock(&glob->lru_lock);
1489                }
1490        }
1491        spin_unlock(&glob->lru_lock);
1492
1493        spin_lock(&man->move_lock);
1494        fence = dma_fence_get(man->move);
1495        spin_unlock(&man->move_lock);
1496
1497        if (fence) {
1498                ret = dma_fence_wait(fence, false);
1499                dma_fence_put(fence);
1500                if (ret)
1501                        return ret;
1502        }
1503
1504        return 0;
1505}
1506
1507int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1508{
1509        struct ttm_mem_type_manager *man;
1510        int ret = -EINVAL;
1511
1512        if (mem_type >= TTM_NUM_MEM_TYPES) {
1513                pr_err("Illegal memory type %d\n", mem_type);
1514                return ret;
1515        }
1516        man = &bdev->man[mem_type];
1517
1518        if (!man->has_type) {
1519                pr_err("Trying to take down uninitialized memory manager type %u\n",
1520                       mem_type);
1521                return ret;
1522        }
1523
1524        man->use_type = false;
1525        man->has_type = false;
1526
1527        ret = 0;
1528        if (mem_type > 0) {
1529                ret = ttm_bo_force_list_clean(bdev, mem_type);
1530                if (ret) {
1531                        pr_err("Cleanup eviction failed\n");
1532                        return ret;
1533                }
1534
1535                ret = (*man->func->takedown)(man);
1536        }
1537
1538        dma_fence_put(man->move);
1539        man->move = NULL;
1540
1541        return ret;
1542}
1543EXPORT_SYMBOL(ttm_bo_clean_mm);
1544
1545int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1546{
1547        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1548
1549        if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1550                pr_err("Illegal memory manager memory type %u\n", mem_type);
1551                return -EINVAL;
1552        }
1553
1554        if (!man->has_type) {
1555                pr_err("Memory type %u has not been initialized\n", mem_type);
1556                return 0;
1557        }
1558
1559        return ttm_bo_force_list_clean(bdev, mem_type);
1560}
1561EXPORT_SYMBOL(ttm_bo_evict_mm);
1562
1563int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1564                        unsigned long p_size)
1565{
1566        int ret;
1567        struct ttm_mem_type_manager *man;
1568        unsigned i;
1569
1570        BUG_ON(type >= TTM_NUM_MEM_TYPES);
1571        man = &bdev->man[type];
1572        BUG_ON(man->has_type);
1573        man->io_reserve_fastpath = true;
1574        man->use_io_reserve_lru = false;
1575        mutex_init(&man->io_reserve_mutex);
1576        spin_lock_init(&man->move_lock);
1577        INIT_LIST_HEAD(&man->io_reserve_lru);
1578
1579        ret = bdev->driver->init_mem_type(bdev, type, man);
1580        if (ret)
1581                return ret;
1582        man->bdev = bdev;
1583
1584        if (type != TTM_PL_SYSTEM) {
1585                ret = (*man->func->init)(man, p_size);
1586                if (ret)
1587                        return ret;
1588        }
1589        man->has_type = true;
1590        man->use_type = true;
1591        man->size = p_size;
1592
1593        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1594                INIT_LIST_HEAD(&man->lru[i]);
1595        man->move = NULL;
1596
1597        return 0;
1598}
1599EXPORT_SYMBOL(ttm_bo_init_mm);
1600
1601static void ttm_bo_global_kobj_release(struct kobject *kobj)
1602{
1603        struct ttm_bo_global *glob =
1604                container_of(kobj, struct ttm_bo_global, kobj);
1605
1606        __free_page(glob->dummy_read_page);
1607}
1608
1609static void ttm_bo_global_release(void)
1610{
1611        struct ttm_bo_global *glob = &ttm_bo_glob;
1612
1613        mutex_lock(&ttm_global_mutex);
1614        if (--ttm_bo_glob_use_count > 0)
1615                goto out;
1616
1617        kobject_del(&glob->kobj);
1618        kobject_put(&glob->kobj);
1619        ttm_mem_global_release(&ttm_mem_glob);
1620        memset(glob, 0, sizeof(*glob));
1621out:
1622        mutex_unlock(&ttm_global_mutex);
1623}
1624
1625static int ttm_bo_global_init(void)
1626{
1627        struct ttm_bo_global *glob = &ttm_bo_glob;
1628        int ret = 0;
1629        unsigned i;
1630
1631        mutex_lock(&ttm_global_mutex);
1632        if (++ttm_bo_glob_use_count > 1)
1633                goto out;
1634
1635        ret = ttm_mem_global_init(&ttm_mem_glob);
1636        if (ret)
1637                goto out;
1638
1639        spin_lock_init(&glob->lru_lock);
1640        glob->mem_glob = &ttm_mem_glob;
1641        glob->mem_glob->bo_glob = glob;
1642        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1643
1644        if (unlikely(glob->dummy_read_page == NULL)) {
1645                ret = -ENOMEM;
1646                goto out;
1647        }
1648
1649        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1650                INIT_LIST_HEAD(&glob->swap_lru[i]);
1651        INIT_LIST_HEAD(&glob->device_list);
1652        atomic_set(&glob->bo_count, 0);
1653
1654        ret = kobject_init_and_add(
1655                &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1656        if (unlikely(ret != 0))
1657                kobject_put(&glob->kobj);
1658out:
1659        mutex_unlock(&ttm_global_mutex);
1660        return ret;
1661}
1662
1663int ttm_bo_device_release(struct ttm_bo_device *bdev)
1664{
1665        int ret = 0;
1666        unsigned i = TTM_NUM_MEM_TYPES;
1667        struct ttm_mem_type_manager *man;
1668        struct ttm_bo_global *glob = bdev->glob;
1669
1670        while (i--) {
1671                man = &bdev->man[i];
1672                if (man->has_type) {
1673                        man->use_type = false;
1674                        if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1675                                ret = -EBUSY;
1676                                pr_err("DRM memory manager type %d is not clean\n",
1677                                       i);
1678                        }
1679                        man->has_type = false;
1680                }
1681        }
1682
1683        mutex_lock(&ttm_global_mutex);
1684        list_del(&bdev->device_list);
1685        mutex_unlock(&ttm_global_mutex);
1686
1687        cancel_delayed_work_sync(&bdev->wq);
1688
1689        if (ttm_bo_delayed_delete(bdev, true))
1690                pr_debug("Delayed destroy list was clean\n");
1691
1692        spin_lock(&glob->lru_lock);
1693        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1694                if (list_empty(&bdev->man[0].lru[0]))
1695                        pr_debug("Swap list %d was clean\n", i);
1696        spin_unlock(&glob->lru_lock);
1697
1698        drm_vma_offset_manager_destroy(&bdev->vma_manager);
1699
1700        if (!ret)
1701                ttm_bo_global_release();
1702
1703        return ret;
1704}
1705EXPORT_SYMBOL(ttm_bo_device_release);
1706
1707int ttm_bo_device_init(struct ttm_bo_device *bdev,
1708                       struct ttm_bo_driver *driver,
1709                       struct address_space *mapping,
1710                       bool need_dma32)
1711{
1712        struct ttm_bo_global *glob = &ttm_bo_glob;
1713        int ret;
1714
1715        ret = ttm_bo_global_init();
1716        if (ret)
1717                return ret;
1718
1719        bdev->driver = driver;
1720
1721        memset(bdev->man, 0, sizeof(bdev->man));
1722
1723        /*
1724         * Initialize the system memory buffer type.
1725         * Other types need to be driver / IOCTL initialized.
1726         */
1727        ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1728        if (unlikely(ret != 0))
1729                goto out_no_sys;
1730
1731        drm_vma_offset_manager_init(&bdev->vma_manager,
1732                                    DRM_FILE_PAGE_OFFSET_START,
1733                                    DRM_FILE_PAGE_OFFSET_SIZE);
1734        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1735        INIT_LIST_HEAD(&bdev->ddestroy);
1736        bdev->dev_mapping = mapping;
1737        bdev->glob = glob;
1738        bdev->need_dma32 = need_dma32;
1739        mutex_lock(&ttm_global_mutex);
1740        list_add_tail(&bdev->device_list, &glob->device_list);
1741        mutex_unlock(&ttm_global_mutex);
1742
1743        return 0;
1744out_no_sys:
1745        ttm_bo_global_release();
1746        return ret;
1747}
1748EXPORT_SYMBOL(ttm_bo_device_init);
1749
1750/*
1751 * buffer object vm functions.
1752 */
1753
1754bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1755{
1756        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1757
1758        if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1759                if (mem->mem_type == TTM_PL_SYSTEM)
1760                        return false;
1761
1762                if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1763                        return false;
1764
1765                if (mem->placement & TTM_PL_FLAG_CACHED)
1766                        return false;
1767        }
1768        return true;
1769}
1770
1771void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1772{
1773        struct ttm_bo_device *bdev = bo->bdev;
1774
1775        drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
1776        ttm_mem_io_free_vm(bo);
1777}
1778
1779void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1780{
1781        struct ttm_bo_device *bdev = bo->bdev;
1782        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1783
1784        ttm_mem_io_lock(man, false);
1785        ttm_bo_unmap_virtual_locked(bo);
1786        ttm_mem_io_unlock(man);
1787}
1788
1789
1790EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1791
1792int ttm_bo_wait(struct ttm_buffer_object *bo,
1793                bool interruptible, bool no_wait)
1794{
1795        long timeout = 15 * HZ;
1796
1797        if (no_wait) {
1798                if (reservation_object_test_signaled_rcu(bo->resv, true))
1799                        return 0;
1800                else
1801                        return -EBUSY;
1802        }
1803
1804        timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1805                                                      interruptible, timeout);
1806        if (timeout < 0)
1807                return timeout;
1808
1809        if (timeout == 0)
1810                return -EBUSY;
1811
1812        reservation_object_add_excl_fence(bo->resv, NULL);
1813        return 0;
1814}
1815EXPORT_SYMBOL(ttm_bo_wait);
1816
1817int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1818{
1819        int ret = 0;
1820
1821        /*
1822         * Using ttm_bo_reserve makes sure the lru lists are updated.
1823         */
1824
1825        ret = ttm_bo_reserve(bo, true, no_wait, NULL);
1826        if (unlikely(ret != 0))
1827                return ret;
1828        ret = ttm_bo_wait(bo, true, no_wait);
1829        if (likely(ret == 0))
1830                atomic_inc(&bo->cpu_writers);
1831        ttm_bo_unreserve(bo);
1832        return ret;
1833}
1834EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1835
1836void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1837{
1838        atomic_dec(&bo->cpu_writers);
1839}
1840EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1841
1842/**
1843 * A buffer object shrink method that tries to swap out the first
1844 * buffer object on the bo_global::swap_lru list.
1845 */
1846int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1847{
1848        struct ttm_buffer_object *bo;
1849        int ret = -EBUSY;
1850        bool locked;
1851        unsigned i;
1852
1853        spin_lock(&glob->lru_lock);
1854        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1855                list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1856                        if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
1857                                                           NULL)) {
1858                                ret = 0;
1859                                break;
1860                        }
1861                }
1862                if (!ret)
1863                        break;
1864        }
1865
1866        if (ret) {
1867                spin_unlock(&glob->lru_lock);
1868                return ret;
1869        }
1870
1871        kref_get(&bo->list_kref);
1872
1873        if (!list_empty(&bo->ddestroy)) {
1874                ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1875                kref_put(&bo->list_kref, ttm_bo_release_list);
1876                return ret;
1877        }
1878
1879        ttm_bo_del_from_lru(bo);
1880        spin_unlock(&glob->lru_lock);
1881
1882        /**
1883         * Move to system cached
1884         */
1885
1886        if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1887            bo->ttm->caching_state != tt_cached) {
1888                struct ttm_operation_ctx ctx = { false, false };
1889                struct ttm_mem_reg evict_mem;
1890
1891                evict_mem = bo->mem;
1892                evict_mem.mm_node = NULL;
1893                evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1894                evict_mem.mem_type = TTM_PL_SYSTEM;
1895
1896                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
1897                if (unlikely(ret != 0))
1898                        goto out;
1899        }
1900
1901        /**
1902         * Make sure BO is idle.
1903         */
1904
1905        ret = ttm_bo_wait(bo, false, false);
1906        if (unlikely(ret != 0))
1907                goto out;
1908
1909        ttm_bo_unmap_virtual(bo);
1910
1911        /**
1912         * Swap out. Buffer will be swapped in again as soon as
1913         * anyone tries to access a ttm page.
1914         */
1915
1916        if (bo->bdev->driver->swap_notify)
1917                bo->bdev->driver->swap_notify(bo);
1918
1919        ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1920out:
1921
1922        /**
1923         *
1924         * Unreserve without putting on LRU to avoid swapping out an
1925         * already swapped buffer.
1926         */
1927        if (locked)
1928                reservation_object_unlock(bo->resv);
1929        kref_put(&bo->list_kref, ttm_bo_release_list);
1930        return ret;
1931}
1932EXPORT_SYMBOL(ttm_bo_swapout);
1933
1934void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1935{
1936        struct ttm_operation_ctx ctx = {
1937                .interruptible = false,
1938                .no_wait_gpu = false
1939        };
1940
1941        while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
1942                ;
1943}
1944EXPORT_SYMBOL(ttm_bo_swapout_all);
1945
1946/**
1947 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1948 * unreserved
1949 *
1950 * @bo: Pointer to buffer
1951 */
1952int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1953{
1954        int ret;
1955
1956        /*
1957         * In the absense of a wait_unlocked API,
1958         * Use the bo::wu_mutex to avoid triggering livelocks due to
1959         * concurrent use of this function. Note that this use of
1960         * bo::wu_mutex can go away if we change locking order to
1961         * mmap_sem -> bo::reserve.
1962         */
1963        ret = mutex_lock_interruptible(&bo->wu_mutex);
1964        if (unlikely(ret != 0))
1965                return -ERESTARTSYS;
1966        if (!ww_mutex_is_locked(&bo->resv->lock))
1967                goto out_unlock;
1968        ret = reservation_object_lock_interruptible(bo->resv, NULL);
1969        if (ret == -EINTR)
1970                ret = -ERESTARTSYS;
1971        if (unlikely(ret != 0))
1972                goto out_unlock;
1973        reservation_object_unlock(bo->resv);
1974
1975out_unlock:
1976        mutex_unlock(&bo->wu_mutex);
1977        return ret;
1978}
1979