linux/drivers/gpu/drm/i915/i915_vma.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/sched/mm.h>
  26#include <drm/drm_gem.h>
  27
  28#include "display/intel_frontbuffer.h"
  29
  30#include "gt/intel_engine.h"
  31#include "gt/intel_engine_heartbeat.h"
  32#include "gt/intel_gt.h"
  33#include "gt/intel_gt_requests.h"
  34
  35#include "i915_drv.h"
  36#include "i915_globals.h"
  37#include "i915_sw_fence_work.h"
  38#include "i915_trace.h"
  39#include "i915_vma.h"
  40
  41static struct i915_global_vma {
  42        struct i915_global base;
  43        struct kmem_cache *slab_vmas;
  44} global;
  45
  46struct i915_vma *i915_vma_alloc(void)
  47{
  48        return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
  49}
  50
  51void i915_vma_free(struct i915_vma *vma)
  52{
  53        return kmem_cache_free(global.slab_vmas, vma);
  54}
  55
  56#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
  57
  58#include <linux/stackdepot.h>
  59
  60static void vma_print_allocator(struct i915_vma *vma, const char *reason)
  61{
  62        unsigned long *entries;
  63        unsigned int nr_entries;
  64        char buf[512];
  65
  66        if (!vma->node.stack) {
  67                DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
  68                                 vma->node.start, vma->node.size, reason);
  69                return;
  70        }
  71
  72        nr_entries = stack_depot_fetch(vma->node.stack, &entries);
  73        stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
  74        DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
  75                         vma->node.start, vma->node.size, reason, buf);
  76}
  77
  78#else
  79
  80static void vma_print_allocator(struct i915_vma *vma, const char *reason)
  81{
  82}
  83
  84#endif
  85
  86static inline struct i915_vma *active_to_vma(struct i915_active *ref)
  87{
  88        return container_of(ref, typeof(struct i915_vma), active);
  89}
  90
  91static int __i915_vma_active(struct i915_active *ref)
  92{
  93        return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
  94}
  95
  96__i915_active_call
  97static void __i915_vma_retire(struct i915_active *ref)
  98{
  99        i915_vma_put(active_to_vma(ref));
 100}
 101
 102static struct i915_vma *
 103vma_create(struct drm_i915_gem_object *obj,
 104           struct i915_address_space *vm,
 105           const struct i915_ggtt_view *view)
 106{
 107        struct i915_vma *pos = ERR_PTR(-E2BIG);
 108        struct i915_vma *vma;
 109        struct rb_node *rb, **p;
 110
 111        /* The aliasing_ppgtt should never be used directly! */
 112        GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
 113
 114        vma = i915_vma_alloc();
 115        if (vma == NULL)
 116                return ERR_PTR(-ENOMEM);
 117
 118        kref_init(&vma->ref);
 119        mutex_init(&vma->pages_mutex);
 120        vma->vm = i915_vm_get(vm);
 121        vma->ops = &vm->vma_ops;
 122        vma->obj = obj;
 123        vma->resv = obj->base.resv;
 124        vma->size = obj->base.size;
 125        vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 126
 127        i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
 128
 129        /* Declare ourselves safe for use inside shrinkers */
 130        if (IS_ENABLED(CONFIG_LOCKDEP)) {
 131                fs_reclaim_acquire(GFP_KERNEL);
 132                might_lock(&vma->active.mutex);
 133                fs_reclaim_release(GFP_KERNEL);
 134        }
 135
 136        INIT_LIST_HEAD(&vma->closed_link);
 137
 138        if (view && view->type != I915_GGTT_VIEW_NORMAL) {
 139                vma->ggtt_view = *view;
 140                if (view->type == I915_GGTT_VIEW_PARTIAL) {
 141                        GEM_BUG_ON(range_overflows_t(u64,
 142                                                     view->partial.offset,
 143                                                     view->partial.size,
 144                                                     obj->base.size >> PAGE_SHIFT));
 145                        vma->size = view->partial.size;
 146                        vma->size <<= PAGE_SHIFT;
 147                        GEM_BUG_ON(vma->size > obj->base.size);
 148                } else if (view->type == I915_GGTT_VIEW_ROTATED) {
 149                        vma->size = intel_rotation_info_size(&view->rotated);
 150                        vma->size <<= PAGE_SHIFT;
 151                } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
 152                        vma->size = intel_remapped_info_size(&view->remapped);
 153                        vma->size <<= PAGE_SHIFT;
 154                }
 155        }
 156
 157        if (unlikely(vma->size > vm->total))
 158                goto err_vma;
 159
 160        GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
 161
 162        spin_lock(&obj->vma.lock);
 163
 164        if (i915_is_ggtt(vm)) {
 165                if (unlikely(overflows_type(vma->size, u32)))
 166                        goto err_unlock;
 167
 168                vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
 169                                                      i915_gem_object_get_tiling(obj),
 170                                                      i915_gem_object_get_stride(obj));
 171                if (unlikely(vma->fence_size < vma->size || /* overflow */
 172                             vma->fence_size > vm->total))
 173                        goto err_unlock;
 174
 175                GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
 176
 177                vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
 178                                                                i915_gem_object_get_tiling(obj),
 179                                                                i915_gem_object_get_stride(obj));
 180                GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
 181
 182                __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
 183        }
 184
 185        rb = NULL;
 186        p = &obj->vma.tree.rb_node;
 187        while (*p) {
 188                long cmp;
 189
 190                rb = *p;
 191                pos = rb_entry(rb, struct i915_vma, obj_node);
 192
 193                /*
 194                 * If the view already exists in the tree, another thread
 195                 * already created a matching vma, so return the older instance
 196                 * and dispose of ours.
 197                 */
 198                cmp = i915_vma_compare(pos, vm, view);
 199                if (cmp < 0)
 200                        p = &rb->rb_right;
 201                else if (cmp > 0)
 202                        p = &rb->rb_left;
 203                else
 204                        goto err_unlock;
 205        }
 206        rb_link_node(&vma->obj_node, rb, p);
 207        rb_insert_color(&vma->obj_node, &obj->vma.tree);
 208
 209        if (i915_vma_is_ggtt(vma))
 210                /*
 211                 * We put the GGTT vma at the start of the vma-list, followed
 212                 * by the ppGGTT vma. This allows us to break early when
 213                 * iterating over only the GGTT vma for an object, see
 214                 * for_each_ggtt_vma()
 215                 */
 216                list_add(&vma->obj_link, &obj->vma.list);
 217        else
 218                list_add_tail(&vma->obj_link, &obj->vma.list);
 219
 220        spin_unlock(&obj->vma.lock);
 221
 222        return vma;
 223
 224err_unlock:
 225        spin_unlock(&obj->vma.lock);
 226err_vma:
 227        i915_vm_put(vm);
 228        i915_vma_free(vma);
 229        return pos;
 230}
 231
 232static struct i915_vma *
 233vma_lookup(struct drm_i915_gem_object *obj,
 234           struct i915_address_space *vm,
 235           const struct i915_ggtt_view *view)
 236{
 237        struct rb_node *rb;
 238
 239        rb = obj->vma.tree.rb_node;
 240        while (rb) {
 241                struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
 242                long cmp;
 243
 244                cmp = i915_vma_compare(vma, vm, view);
 245                if (cmp == 0)
 246                        return vma;
 247
 248                if (cmp < 0)
 249                        rb = rb->rb_right;
 250                else
 251                        rb = rb->rb_left;
 252        }
 253
 254        return NULL;
 255}
 256
 257/**
 258 * i915_vma_instance - return the singleton instance of the VMA
 259 * @obj: parent &struct drm_i915_gem_object to be mapped
 260 * @vm: address space in which the mapping is located
 261 * @view: additional mapping requirements
 262 *
 263 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
 264 * the same @view characteristics. If a match is not found, one is created.
 265 * Once created, the VMA is kept until either the object is freed, or the
 266 * address space is closed.
 267 *
 268 * Returns the vma, or an error pointer.
 269 */
 270struct i915_vma *
 271i915_vma_instance(struct drm_i915_gem_object *obj,
 272                  struct i915_address_space *vm,
 273                  const struct i915_ggtt_view *view)
 274{
 275        struct i915_vma *vma;
 276
 277        GEM_BUG_ON(view && !i915_is_ggtt(vm));
 278        GEM_BUG_ON(!atomic_read(&vm->open));
 279
 280        spin_lock(&obj->vma.lock);
 281        vma = vma_lookup(obj, vm, view);
 282        spin_unlock(&obj->vma.lock);
 283
 284        /* vma_create() will resolve the race if another creates the vma */
 285        if (unlikely(!vma))
 286                vma = vma_create(obj, vm, view);
 287
 288        GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
 289        return vma;
 290}
 291
 292struct i915_vma_work {
 293        struct dma_fence_work base;
 294        struct i915_address_space *vm;
 295        struct i915_vm_pt_stash stash;
 296        struct i915_vma *vma;
 297        struct drm_i915_gem_object *pinned;
 298        struct i915_sw_dma_fence_cb cb;
 299        enum i915_cache_level cache_level;
 300        unsigned int flags;
 301};
 302
 303static int __vma_bind(struct dma_fence_work *work)
 304{
 305        struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
 306        struct i915_vma *vma = vw->vma;
 307
 308        vma->ops->bind_vma(vw->vm, &vw->stash,
 309                           vma, vw->cache_level, vw->flags);
 310        return 0;
 311}
 312
 313static void __vma_release(struct dma_fence_work *work)
 314{
 315        struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
 316
 317        if (vw->pinned) {
 318                __i915_gem_object_unpin_pages(vw->pinned);
 319                i915_gem_object_put(vw->pinned);
 320        }
 321
 322        i915_vm_free_pt_stash(vw->vm, &vw->stash);
 323        i915_vm_put(vw->vm);
 324}
 325
 326static const struct dma_fence_work_ops bind_ops = {
 327        .name = "bind",
 328        .work = __vma_bind,
 329        .release = __vma_release,
 330};
 331
 332struct i915_vma_work *i915_vma_work(void)
 333{
 334        struct i915_vma_work *vw;
 335
 336        vw = kzalloc(sizeof(*vw), GFP_KERNEL);
 337        if (!vw)
 338                return NULL;
 339
 340        dma_fence_work_init(&vw->base, &bind_ops);
 341        vw->base.dma.error = -EAGAIN; /* disable the worker by default */
 342
 343        return vw;
 344}
 345
 346int i915_vma_wait_for_bind(struct i915_vma *vma)
 347{
 348        int err = 0;
 349
 350        if (rcu_access_pointer(vma->active.excl.fence)) {
 351                struct dma_fence *fence;
 352
 353                rcu_read_lock();
 354                fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
 355                rcu_read_unlock();
 356                if (fence) {
 357                        err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
 358                        dma_fence_put(fence);
 359                }
 360        }
 361
 362        return err;
 363}
 364
 365/**
 366 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
 367 * @vma: VMA to map
 368 * @cache_level: mapping cache level
 369 * @flags: flags like global or local mapping
 370 * @work: preallocated worker for allocating and binding the PTE
 371 *
 372 * DMA addresses are taken from the scatter-gather table of this object (or of
 373 * this VMA in case of non-default GGTT views) and PTE entries set up.
 374 * Note that DMA addresses are also the only part of the SG table we care about.
 375 */
 376int i915_vma_bind(struct i915_vma *vma,
 377                  enum i915_cache_level cache_level,
 378                  u32 flags,
 379                  struct i915_vma_work *work)
 380{
 381        u32 bind_flags;
 382        u32 vma_flags;
 383
 384        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 385        GEM_BUG_ON(vma->size > vma->node.size);
 386
 387        if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
 388                                              vma->node.size,
 389                                              vma->vm->total)))
 390                return -ENODEV;
 391
 392        if (GEM_DEBUG_WARN_ON(!flags))
 393                return -EINVAL;
 394
 395        bind_flags = flags;
 396        bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 397
 398        vma_flags = atomic_read(&vma->flags);
 399        vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 400
 401        bind_flags &= ~vma_flags;
 402        if (bind_flags == 0)
 403                return 0;
 404
 405        GEM_BUG_ON(!vma->pages);
 406
 407        trace_i915_vma_bind(vma, bind_flags);
 408        if (work && bind_flags & vma->vm->bind_async_flags) {
 409                struct dma_fence *prev;
 410
 411                work->vma = vma;
 412                work->cache_level = cache_level;
 413                work->flags = bind_flags;
 414
 415                /*
 416                 * Note we only want to chain up to the migration fence on
 417                 * the pages (not the object itself). As we don't track that,
 418                 * yet, we have to use the exclusive fence instead.
 419                 *
 420                 * Also note that we do not want to track the async vma as
 421                 * part of the obj->resv->excl_fence as it only affects
 422                 * execution and not content or object's backing store lifetime.
 423                 */
 424                prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
 425                if (prev) {
 426                        __i915_sw_fence_await_dma_fence(&work->base.chain,
 427                                                        prev,
 428                                                        &work->cb);
 429                        dma_fence_put(prev);
 430                }
 431
 432                work->base.dma.error = 0; /* enable the queue_work() */
 433
 434                if (vma->obj) {
 435                        __i915_gem_object_pin_pages(vma->obj);
 436                        work->pinned = i915_gem_object_get(vma->obj);
 437                }
 438        } else {
 439                vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
 440        }
 441
 442        atomic_or(bind_flags, &vma->flags);
 443        return 0;
 444}
 445
 446void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 447{
 448        void __iomem *ptr;
 449        int err;
 450
 451        if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
 452                err = -ENODEV;
 453                goto err;
 454        }
 455
 456        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 457        GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
 458
 459        ptr = READ_ONCE(vma->iomap);
 460        if (ptr == NULL) {
 461                ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
 462                                        vma->node.start,
 463                                        vma->node.size);
 464                if (ptr == NULL) {
 465                        err = -ENOMEM;
 466                        goto err;
 467                }
 468
 469                if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
 470                        io_mapping_unmap(ptr);
 471                        ptr = vma->iomap;
 472                }
 473        }
 474
 475        __i915_vma_pin(vma);
 476
 477        err = i915_vma_pin_fence(vma);
 478        if (err)
 479                goto err_unpin;
 480
 481        i915_vma_set_ggtt_write(vma);
 482
 483        /* NB Access through the GTT requires the device to be awake. */
 484        return ptr;
 485
 486err_unpin:
 487        __i915_vma_unpin(vma);
 488err:
 489        return IO_ERR_PTR(err);
 490}
 491
 492void i915_vma_flush_writes(struct i915_vma *vma)
 493{
 494        if (i915_vma_unset_ggtt_write(vma))
 495                intel_gt_flush_ggtt_writes(vma->vm->gt);
 496}
 497
 498void i915_vma_unpin_iomap(struct i915_vma *vma)
 499{
 500        GEM_BUG_ON(vma->iomap == NULL);
 501
 502        i915_vma_flush_writes(vma);
 503
 504        i915_vma_unpin_fence(vma);
 505        i915_vma_unpin(vma);
 506}
 507
 508void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
 509{
 510        struct i915_vma *vma;
 511        struct drm_i915_gem_object *obj;
 512
 513        vma = fetch_and_zero(p_vma);
 514        if (!vma)
 515                return;
 516
 517        obj = vma->obj;
 518        GEM_BUG_ON(!obj);
 519
 520        i915_vma_unpin(vma);
 521
 522        if (flags & I915_VMA_RELEASE_MAP)
 523                i915_gem_object_unpin_map(obj);
 524
 525        i915_gem_object_put(obj);
 526}
 527
 528bool i915_vma_misplaced(const struct i915_vma *vma,
 529                        u64 size, u64 alignment, u64 flags)
 530{
 531        if (!drm_mm_node_allocated(&vma->node))
 532                return false;
 533
 534        if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
 535                return true;
 536
 537        if (vma->node.size < size)
 538                return true;
 539
 540        GEM_BUG_ON(alignment && !is_power_of_2(alignment));
 541        if (alignment && !IS_ALIGNED(vma->node.start, alignment))
 542                return true;
 543
 544        if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
 545                return true;
 546
 547        if (flags & PIN_OFFSET_BIAS &&
 548            vma->node.start < (flags & PIN_OFFSET_MASK))
 549                return true;
 550
 551        if (flags & PIN_OFFSET_FIXED &&
 552            vma->node.start != (flags & PIN_OFFSET_MASK))
 553                return true;
 554
 555        return false;
 556}
 557
 558void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 559{
 560        bool mappable, fenceable;
 561
 562        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 563        GEM_BUG_ON(!vma->fence_size);
 564
 565        fenceable = (vma->node.size >= vma->fence_size &&
 566                     IS_ALIGNED(vma->node.start, vma->fence_alignment));
 567
 568        mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
 569
 570        if (mappable && fenceable)
 571                set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
 572        else
 573                clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
 574}
 575
 576bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
 577{
 578        struct drm_mm_node *node = &vma->node;
 579        struct drm_mm_node *other;
 580
 581        /*
 582         * On some machines we have to be careful when putting differing types
 583         * of snoopable memory together to avoid the prefetcher crossing memory
 584         * domains and dying. During vm initialisation, we decide whether or not
 585         * these constraints apply and set the drm_mm.color_adjust
 586         * appropriately.
 587         */
 588        if (!i915_vm_has_cache_coloring(vma->vm))
 589                return true;
 590
 591        /* Only valid to be called on an already inserted vma */
 592        GEM_BUG_ON(!drm_mm_node_allocated(node));
 593        GEM_BUG_ON(list_empty(&node->node_list));
 594
 595        other = list_prev_entry(node, node_list);
 596        if (i915_node_color_differs(other, color) &&
 597            !drm_mm_hole_follows(other))
 598                return false;
 599
 600        other = list_next_entry(node, node_list);
 601        if (i915_node_color_differs(other, color) &&
 602            !drm_mm_hole_follows(node))
 603                return false;
 604
 605        return true;
 606}
 607
 608/**
 609 * i915_vma_insert - finds a slot for the vma in its address space
 610 * @vma: the vma
 611 * @size: requested size in bytes (can be larger than the VMA)
 612 * @alignment: required alignment
 613 * @flags: mask of PIN_* flags to use
 614 *
 615 * First we try to allocate some free space that meets the requirements for
 616 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 617 * preferrably the oldest idle entry to make room for the new VMA.
 618 *
 619 * Returns:
 620 * 0 on success, negative error code otherwise.
 621 */
 622static int
 623i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 624{
 625        unsigned long color;
 626        u64 start, end;
 627        int ret;
 628
 629        GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 630        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 631
 632        size = max(size, vma->size);
 633        alignment = max(alignment, vma->display_alignment);
 634        if (flags & PIN_MAPPABLE) {
 635                size = max_t(typeof(size), size, vma->fence_size);
 636                alignment = max_t(typeof(alignment),
 637                                  alignment, vma->fence_alignment);
 638        }
 639
 640        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 641        GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
 642        GEM_BUG_ON(!is_power_of_2(alignment));
 643
 644        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
 645        GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 646
 647        end = vma->vm->total;
 648        if (flags & PIN_MAPPABLE)
 649                end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
 650        if (flags & PIN_ZONE_4G)
 651                end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
 652        GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 653
 654        /* If binding the object/GGTT view requires more space than the entire
 655         * aperture has, reject it early before evicting everything in a vain
 656         * attempt to find space.
 657         */
 658        if (size > end) {
 659                DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
 660                          size, flags & PIN_MAPPABLE ? "mappable" : "total",
 661                          end);
 662                return -ENOSPC;
 663        }
 664
 665        color = 0;
 666        if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
 667                color = vma->obj->cache_level;
 668
 669        if (flags & PIN_OFFSET_FIXED) {
 670                u64 offset = flags & PIN_OFFSET_MASK;
 671                if (!IS_ALIGNED(offset, alignment) ||
 672                    range_overflows(offset, size, end))
 673                        return -EINVAL;
 674
 675                ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 676                                           size, offset, color,
 677                                           flags);
 678                if (ret)
 679                        return ret;
 680        } else {
 681                /*
 682                 * We only support huge gtt pages through the 48b PPGTT,
 683                 * however we also don't want to force any alignment for
 684                 * objects which need to be tightly packed into the low 32bits.
 685                 *
 686                 * Note that we assume that GGTT are limited to 4GiB for the
 687                 * forseeable future. See also i915_ggtt_offset().
 688                 */
 689                if (upper_32_bits(end - 1) &&
 690                    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
 691                        /*
 692                         * We can't mix 64K and 4K PTEs in the same page-table
 693                         * (2M block), and so to avoid the ugliness and
 694                         * complexity of coloring we opt for just aligning 64K
 695                         * objects to 2M.
 696                         */
 697                        u64 page_alignment =
 698                                rounddown_pow_of_two(vma->page_sizes.sg |
 699                                                     I915_GTT_PAGE_SIZE_2M);
 700
 701                        /*
 702                         * Check we don't expand for the limited Global GTT
 703                         * (mappable aperture is even more precious!). This
 704                         * also checks that we exclude the aliasing-ppgtt.
 705                         */
 706                        GEM_BUG_ON(i915_vma_is_ggtt(vma));
 707
 708                        alignment = max(alignment, page_alignment);
 709
 710                        if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
 711                                size = round_up(size, I915_GTT_PAGE_SIZE_2M);
 712                }
 713
 714                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
 715                                          size, alignment, color,
 716                                          start, end, flags);
 717                if (ret)
 718                        return ret;
 719
 720                GEM_BUG_ON(vma->node.start < start);
 721                GEM_BUG_ON(vma->node.start + vma->node.size > end);
 722        }
 723        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 724        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 725
 726        list_add_tail(&vma->vm_link, &vma->vm->bound_list);
 727
 728        return 0;
 729}
 730
 731static void
 732i915_vma_detach(struct i915_vma *vma)
 733{
 734        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 735        GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 736
 737        /*
 738         * And finally now the object is completely decoupled from this
 739         * vma, we can drop its hold on the backing storage and allow
 740         * it to be reaped by the shrinker.
 741         */
 742        list_del(&vma->vm_link);
 743}
 744
 745static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
 746{
 747        unsigned int bound;
 748        bool pinned = true;
 749
 750        bound = atomic_read(&vma->flags);
 751        do {
 752                if (unlikely(flags & ~bound))
 753                        return false;
 754
 755                if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
 756                        return false;
 757
 758                if (!(bound & I915_VMA_PIN_MASK))
 759                        goto unpinned;
 760
 761                GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
 762        } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
 763
 764        return true;
 765
 766unpinned:
 767        /*
 768         * If pin_count==0, but we are bound, check under the lock to avoid
 769         * racing with a concurrent i915_vma_unbind().
 770         */
 771        mutex_lock(&vma->vm->mutex);
 772        do {
 773                if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
 774                        pinned = false;
 775                        break;
 776                }
 777
 778                if (unlikely(flags & ~bound)) {
 779                        pinned = false;
 780                        break;
 781                }
 782        } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
 783        mutex_unlock(&vma->vm->mutex);
 784
 785        return pinned;
 786}
 787
 788static int vma_get_pages(struct i915_vma *vma)
 789{
 790        int err = 0;
 791
 792        if (atomic_add_unless(&vma->pages_count, 1, 0))
 793                return 0;
 794
 795        /* Allocations ahoy! */
 796        if (mutex_lock_interruptible(&vma->pages_mutex))
 797                return -EINTR;
 798
 799        if (!atomic_read(&vma->pages_count)) {
 800                if (vma->obj) {
 801                        err = i915_gem_object_pin_pages(vma->obj);
 802                        if (err)
 803                                goto unlock;
 804                }
 805
 806                err = vma->ops->set_pages(vma);
 807                if (err) {
 808                        if (vma->obj)
 809                                i915_gem_object_unpin_pages(vma->obj);
 810                        goto unlock;
 811                }
 812        }
 813        atomic_inc(&vma->pages_count);
 814
 815unlock:
 816        mutex_unlock(&vma->pages_mutex);
 817
 818        return err;
 819}
 820
 821static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
 822{
 823        /* We allocate under vma_get_pages, so beware the shrinker */
 824        mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
 825        GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
 826        if (atomic_sub_return(count, &vma->pages_count) == 0) {
 827                vma->ops->clear_pages(vma);
 828                GEM_BUG_ON(vma->pages);
 829                if (vma->obj)
 830                        i915_gem_object_unpin_pages(vma->obj);
 831        }
 832        mutex_unlock(&vma->pages_mutex);
 833}
 834
 835static void vma_put_pages(struct i915_vma *vma)
 836{
 837        if (atomic_add_unless(&vma->pages_count, -1, 1))
 838                return;
 839
 840        __vma_put_pages(vma, 1);
 841}
 842
 843static void vma_unbind_pages(struct i915_vma *vma)
 844{
 845        unsigned int count;
 846
 847        lockdep_assert_held(&vma->vm->mutex);
 848
 849        /* The upper portion of pages_count is the number of bindings */
 850        count = atomic_read(&vma->pages_count);
 851        count >>= I915_VMA_PAGES_BIAS;
 852        GEM_BUG_ON(!count);
 853
 854        __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
 855}
 856
 857int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
 858                    u64 size, u64 alignment, u64 flags)
 859{
 860        struct i915_vma_work *work = NULL;
 861        intel_wakeref_t wakeref = 0;
 862        unsigned int bound;
 863        int err;
 864
 865#ifdef CONFIG_PROVE_LOCKING
 866        if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
 867                WARN_ON(!ww);
 868#endif
 869
 870        BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
 871        BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
 872
 873        GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
 874
 875        /* First try and grab the pin without rebinding the vma */
 876        if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
 877                return 0;
 878
 879        err = vma_get_pages(vma);
 880        if (err)
 881                return err;
 882
 883        if (flags & PIN_GLOBAL)
 884                wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
 885
 886        if (flags & vma->vm->bind_async_flags) {
 887                work = i915_vma_work();
 888                if (!work) {
 889                        err = -ENOMEM;
 890                        goto err_rpm;
 891                }
 892
 893                work->vm = i915_vm_get(vma->vm);
 894
 895                /* Allocate enough page directories to used PTE */
 896                if (vma->vm->allocate_va_range) {
 897                        err = i915_vm_alloc_pt_stash(vma->vm,
 898                                                     &work->stash,
 899                                                     vma->size);
 900                        if (err)
 901                                goto err_fence;
 902
 903                        err = i915_vm_pin_pt_stash(vma->vm,
 904                                                   &work->stash);
 905                        if (err)
 906                                goto err_fence;
 907                }
 908        }
 909
 910        /*
 911         * Differentiate between user/kernel vma inside the aliasing-ppgtt.
 912         *
 913         * We conflate the Global GTT with the user's vma when using the
 914         * aliasing-ppgtt, but it is still vitally important to try and
 915         * keep the use cases distinct. For example, userptr objects are
 916         * not allowed inside the Global GTT as that will cause lock
 917         * inversions when we have to evict them the mmu_notifier callbacks -
 918         * but they are allowed to be part of the user ppGTT which can never
 919         * be mapped. As such we try to give the distinct users of the same
 920         * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
 921         * and i915_ppgtt separate].
 922         *
 923         * NB this may cause us to mask real lock inversions -- while the
 924         * code is safe today, lockdep may not be able to spot future
 925         * transgressions.
 926         */
 927        err = mutex_lock_interruptible_nested(&vma->vm->mutex,
 928                                              !(flags & PIN_GLOBAL));
 929        if (err)
 930                goto err_fence;
 931
 932        /* No more allocations allowed now we hold vm->mutex */
 933
 934        if (unlikely(i915_vma_is_closed(vma))) {
 935                err = -ENOENT;
 936                goto err_unlock;
 937        }
 938
 939        bound = atomic_read(&vma->flags);
 940        if (unlikely(bound & I915_VMA_ERROR)) {
 941                err = -ENOMEM;
 942                goto err_unlock;
 943        }
 944
 945        if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
 946                err = -EAGAIN; /* pins are meant to be fairly temporary */
 947                goto err_unlock;
 948        }
 949
 950        if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
 951                __i915_vma_pin(vma);
 952                goto err_unlock;
 953        }
 954
 955        err = i915_active_acquire(&vma->active);
 956        if (err)
 957                goto err_unlock;
 958
 959        if (!(bound & I915_VMA_BIND_MASK)) {
 960                err = i915_vma_insert(vma, size, alignment, flags);
 961                if (err)
 962                        goto err_active;
 963
 964                if (i915_is_ggtt(vma->vm))
 965                        __i915_vma_set_map_and_fenceable(vma);
 966        }
 967
 968        GEM_BUG_ON(!vma->pages);
 969        err = i915_vma_bind(vma,
 970                            vma->obj ? vma->obj->cache_level : 0,
 971                            flags, work);
 972        if (err)
 973                goto err_remove;
 974
 975        /* There should only be at most 2 active bindings (user, global) */
 976        GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
 977        atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
 978        list_move_tail(&vma->vm_link, &vma->vm->bound_list);
 979
 980        __i915_vma_pin(vma);
 981        GEM_BUG_ON(!i915_vma_is_pinned(vma));
 982        GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
 983        GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 984
 985err_remove:
 986        if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
 987                i915_vma_detach(vma);
 988                drm_mm_remove_node(&vma->node);
 989        }
 990err_active:
 991        i915_active_release(&vma->active);
 992err_unlock:
 993        mutex_unlock(&vma->vm->mutex);
 994err_fence:
 995        if (work)
 996                dma_fence_work_commit_imm(&work->base);
 997err_rpm:
 998        if (wakeref)
 999                intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1000        vma_put_pages(vma);
1001        return err;
1002}
1003
1004static void flush_idle_contexts(struct intel_gt *gt)
1005{
1006        struct intel_engine_cs *engine;
1007        enum intel_engine_id id;
1008
1009        for_each_engine(engine, gt, id)
1010                intel_engine_flush_barriers(engine);
1011
1012        intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1013}
1014
1015int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1016                  u32 align, unsigned int flags)
1017{
1018        struct i915_address_space *vm = vma->vm;
1019        int err;
1020
1021        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1022
1023        do {
1024                err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1025                if (err != -ENOSPC) {
1026                        if (!err) {
1027                                err = i915_vma_wait_for_bind(vma);
1028                                if (err)
1029                                        i915_vma_unpin(vma);
1030                        }
1031                        return err;
1032                }
1033
1034                /* Unlike i915_vma_pin, we don't take no for an answer! */
1035                flush_idle_contexts(vm->gt);
1036                if (mutex_lock_interruptible(&vm->mutex) == 0) {
1037                        i915_gem_evict_vm(vm);
1038                        mutex_unlock(&vm->mutex);
1039                }
1040        } while (1);
1041}
1042
1043static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1044{
1045        /*
1046         * We defer actually closing, unbinding and destroying the VMA until
1047         * the next idle point, or if the object is freed in the meantime. By
1048         * postponing the unbind, we allow for it to be resurrected by the
1049         * client, avoiding the work required to rebind the VMA. This is
1050         * advantageous for DRI, where the client/server pass objects
1051         * between themselves, temporarily opening a local VMA to the
1052         * object, and then closing it again. The same object is then reused
1053         * on the next frame (or two, depending on the depth of the swap queue)
1054         * causing us to rebind the VMA once more. This ends up being a lot
1055         * of wasted work for the steady state.
1056         */
1057        GEM_BUG_ON(i915_vma_is_closed(vma));
1058        list_add(&vma->closed_link, &gt->closed_vma);
1059}
1060
1061void i915_vma_close(struct i915_vma *vma)
1062{
1063        struct intel_gt *gt = vma->vm->gt;
1064        unsigned long flags;
1065
1066        if (i915_vma_is_ggtt(vma))
1067                return;
1068
1069        GEM_BUG_ON(!atomic_read(&vma->open_count));
1070        if (atomic_dec_and_lock_irqsave(&vma->open_count,
1071                                        &gt->closed_lock,
1072                                        flags)) {
1073                __vma_close(vma, gt);
1074                spin_unlock_irqrestore(&gt->closed_lock, flags);
1075        }
1076}
1077
1078static void __i915_vma_remove_closed(struct i915_vma *vma)
1079{
1080        struct intel_gt *gt = vma->vm->gt;
1081
1082        spin_lock_irq(&gt->closed_lock);
1083        list_del_init(&vma->closed_link);
1084        spin_unlock_irq(&gt->closed_lock);
1085}
1086
1087void i915_vma_reopen(struct i915_vma *vma)
1088{
1089        if (i915_vma_is_closed(vma))
1090                __i915_vma_remove_closed(vma);
1091}
1092
1093void i915_vma_release(struct kref *ref)
1094{
1095        struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1096
1097        if (drm_mm_node_allocated(&vma->node)) {
1098                mutex_lock(&vma->vm->mutex);
1099                atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1100                WARN_ON(__i915_vma_unbind(vma));
1101                mutex_unlock(&vma->vm->mutex);
1102                GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1103        }
1104        GEM_BUG_ON(i915_vma_is_active(vma));
1105
1106        if (vma->obj) {
1107                struct drm_i915_gem_object *obj = vma->obj;
1108
1109                spin_lock(&obj->vma.lock);
1110                list_del(&vma->obj_link);
1111                if (!RB_EMPTY_NODE(&vma->obj_node))
1112                        rb_erase(&vma->obj_node, &obj->vma.tree);
1113                spin_unlock(&obj->vma.lock);
1114        }
1115
1116        __i915_vma_remove_closed(vma);
1117        i915_vm_put(vma->vm);
1118
1119        i915_active_fini(&vma->active);
1120        i915_vma_free(vma);
1121}
1122
1123void i915_vma_parked(struct intel_gt *gt)
1124{
1125        struct i915_vma *vma, *next;
1126        LIST_HEAD(closed);
1127
1128        spin_lock_irq(&gt->closed_lock);
1129        list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1130                struct drm_i915_gem_object *obj = vma->obj;
1131                struct i915_address_space *vm = vma->vm;
1132
1133                /* XXX All to avoid keeping a reference on i915_vma itself */
1134
1135                if (!kref_get_unless_zero(&obj->base.refcount))
1136                        continue;
1137
1138                if (!i915_vm_tryopen(vm)) {
1139                        i915_gem_object_put(obj);
1140                        continue;
1141                }
1142
1143                list_move(&vma->closed_link, &closed);
1144        }
1145        spin_unlock_irq(&gt->closed_lock);
1146
1147        /* As the GT is held idle, no vma can be reopened as we destroy them */
1148        list_for_each_entry_safe(vma, next, &closed, closed_link) {
1149                struct drm_i915_gem_object *obj = vma->obj;
1150                struct i915_address_space *vm = vma->vm;
1151
1152                INIT_LIST_HEAD(&vma->closed_link);
1153                __i915_vma_put(vma);
1154
1155                i915_gem_object_put(obj);
1156                i915_vm_close(vm);
1157        }
1158}
1159
1160static void __i915_vma_iounmap(struct i915_vma *vma)
1161{
1162        GEM_BUG_ON(i915_vma_is_pinned(vma));
1163
1164        if (vma->iomap == NULL)
1165                return;
1166
1167        io_mapping_unmap(vma->iomap);
1168        vma->iomap = NULL;
1169}
1170
1171void i915_vma_revoke_mmap(struct i915_vma *vma)
1172{
1173        struct drm_vma_offset_node *node;
1174        u64 vma_offset;
1175
1176        if (!i915_vma_has_userfault(vma))
1177                return;
1178
1179        GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1180        GEM_BUG_ON(!vma->obj->userfault_count);
1181
1182        node = &vma->mmo->vma_node;
1183        vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1184        unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1185                            drm_vma_node_offset_addr(node) + vma_offset,
1186                            vma->size,
1187                            1);
1188
1189        i915_vma_unset_userfault(vma);
1190        if (!--vma->obj->userfault_count)
1191                list_del(&vma->obj->userfault_link);
1192}
1193
1194static int
1195__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1196{
1197        return __i915_request_await_exclusive(rq, &vma->active);
1198}
1199
1200int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1201{
1202        int err;
1203
1204        GEM_BUG_ON(!i915_vma_is_pinned(vma));
1205
1206        /* Wait for the vma to be bound before we start! */
1207        err = __i915_request_await_bind(rq, vma);
1208        if (err)
1209                return err;
1210
1211        return i915_active_add_request(&vma->active, rq);
1212}
1213
1214int i915_vma_move_to_active(struct i915_vma *vma,
1215                            struct i915_request *rq,
1216                            unsigned int flags)
1217{
1218        struct drm_i915_gem_object *obj = vma->obj;
1219        int err;
1220
1221        assert_object_held(obj);
1222
1223        err = __i915_vma_move_to_active(vma, rq);
1224        if (unlikely(err))
1225                return err;
1226
1227        if (flags & EXEC_OBJECT_WRITE) {
1228                struct intel_frontbuffer *front;
1229
1230                front = __intel_frontbuffer_get(obj);
1231                if (unlikely(front)) {
1232                        if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1233                                i915_active_add_request(&front->write, rq);
1234                        intel_frontbuffer_put(front);
1235                }
1236
1237                dma_resv_add_excl_fence(vma->resv, &rq->fence);
1238                obj->write_domain = I915_GEM_DOMAIN_RENDER;
1239                obj->read_domains = 0;
1240        } else {
1241                err = dma_resv_reserve_shared(vma->resv, 1);
1242                if (unlikely(err))
1243                        return err;
1244
1245                dma_resv_add_shared_fence(vma->resv, &rq->fence);
1246                obj->write_domain = 0;
1247        }
1248
1249        if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1250                i915_active_add_request(&vma->fence->active, rq);
1251
1252        obj->read_domains |= I915_GEM_GPU_DOMAINS;
1253        obj->mm.dirty = true;
1254
1255        GEM_BUG_ON(!i915_vma_is_active(vma));
1256        return 0;
1257}
1258
1259void __i915_vma_evict(struct i915_vma *vma)
1260{
1261        GEM_BUG_ON(i915_vma_is_pinned(vma));
1262
1263        if (i915_vma_is_map_and_fenceable(vma)) {
1264                /* Force a pagefault for domain tracking on next user access */
1265                i915_vma_revoke_mmap(vma);
1266
1267                /*
1268                 * Check that we have flushed all writes through the GGTT
1269                 * before the unbind, other due to non-strict nature of those
1270                 * indirect writes they may end up referencing the GGTT PTE
1271                 * after the unbind.
1272                 *
1273                 * Note that we may be concurrently poking at the GGTT_WRITE
1274                 * bit from set-domain, as we mark all GGTT vma associated
1275                 * with an object. We know this is for another vma, as we
1276                 * are currently unbinding this one -- so if this vma will be
1277                 * reused, it will be refaulted and have its dirty bit set
1278                 * before the next write.
1279                 */
1280                i915_vma_flush_writes(vma);
1281
1282                /* release the fence reg _after_ flushing */
1283                i915_vma_revoke_fence(vma);
1284
1285                __i915_vma_iounmap(vma);
1286                clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1287        }
1288        GEM_BUG_ON(vma->fence);
1289        GEM_BUG_ON(i915_vma_has_userfault(vma));
1290
1291        if (likely(atomic_read(&vma->vm->open))) {
1292                trace_i915_vma_unbind(vma);
1293                vma->ops->unbind_vma(vma->vm, vma);
1294        }
1295        atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1296                   &vma->flags);
1297
1298        i915_vma_detach(vma);
1299        vma_unbind_pages(vma);
1300}
1301
1302int __i915_vma_unbind(struct i915_vma *vma)
1303{
1304        int ret;
1305
1306        lockdep_assert_held(&vma->vm->mutex);
1307
1308        if (!drm_mm_node_allocated(&vma->node))
1309                return 0;
1310
1311        if (i915_vma_is_pinned(vma)) {
1312                vma_print_allocator(vma, "is pinned");
1313                return -EAGAIN;
1314        }
1315
1316        /*
1317         * After confirming that no one else is pinning this vma, wait for
1318         * any laggards who may have crept in during the wait (through
1319         * a residual pin skipping the vm->mutex) to complete.
1320         */
1321        ret = i915_vma_sync(vma);
1322        if (ret)
1323                return ret;
1324
1325        GEM_BUG_ON(i915_vma_is_active(vma));
1326        __i915_vma_evict(vma);
1327
1328        drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1329        return 0;
1330}
1331
1332int i915_vma_unbind(struct i915_vma *vma)
1333{
1334        struct i915_address_space *vm = vma->vm;
1335        intel_wakeref_t wakeref = 0;
1336        int err;
1337
1338        /* Optimistic wait before taking the mutex */
1339        err = i915_vma_sync(vma);
1340        if (err)
1341                return err;
1342
1343        if (!drm_mm_node_allocated(&vma->node))
1344                return 0;
1345
1346        if (i915_vma_is_pinned(vma)) {
1347                vma_print_allocator(vma, "is pinned");
1348                return -EAGAIN;
1349        }
1350
1351        if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1352                /* XXX not always required: nop_clear_range */
1353                wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1354
1355        err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1356        if (err)
1357                goto out_rpm;
1358
1359        err = __i915_vma_unbind(vma);
1360        mutex_unlock(&vm->mutex);
1361
1362out_rpm:
1363        if (wakeref)
1364                intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1365        return err;
1366}
1367
1368struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1369{
1370        i915_gem_object_make_unshrinkable(vma->obj);
1371        return vma;
1372}
1373
1374void i915_vma_make_shrinkable(struct i915_vma *vma)
1375{
1376        i915_gem_object_make_shrinkable(vma->obj);
1377}
1378
1379void i915_vma_make_purgeable(struct i915_vma *vma)
1380{
1381        i915_gem_object_make_purgeable(vma->obj);
1382}
1383
1384#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1385#include "selftests/i915_vma.c"
1386#endif
1387
1388static void i915_global_vma_shrink(void)
1389{
1390        kmem_cache_shrink(global.slab_vmas);
1391}
1392
1393static void i915_global_vma_exit(void)
1394{
1395        kmem_cache_destroy(global.slab_vmas);
1396}
1397
1398static struct i915_global_vma global = { {
1399        .shrink = i915_global_vma_shrink,
1400        .exit = i915_global_vma_exit,
1401} };
1402
1403int __init i915_global_vma_init(void)
1404{
1405        global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1406        if (!global.slab_vmas)
1407                return -ENOMEM;
1408
1409        i915_global_register(&global.base);
1410        return 0;
1411}
1412