linux/drivers/gpu/drm/i915/i915_vma.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24 
  25#include "i915_vma.h"
  26
  27#include "i915_drv.h"
  28#include "intel_ringbuffer.h"
  29#include "intel_frontbuffer.h"
  30
  31#include <drm/drm_gem.h>
  32
  33static void
  34i915_vma_retire(struct i915_gem_active *active,
  35                struct drm_i915_gem_request *rq)
  36{
  37        const unsigned int idx = rq->engine->id;
  38        struct i915_vma *vma =
  39                container_of(active, struct i915_vma, last_read[idx]);
  40        struct drm_i915_gem_object *obj = vma->obj;
  41
  42        GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
  43
  44        i915_vma_clear_active(vma, idx);
  45        if (i915_vma_is_active(vma))
  46                return;
  47
  48        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
  49        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  50        if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
  51                WARN_ON(i915_vma_unbind(vma));
  52
  53        GEM_BUG_ON(!i915_gem_object_is_active(obj));
  54        if (--obj->active_count)
  55                return;
  56
  57        /* Bump our place on the bound list to keep it roughly in LRU order
  58         * so that we don't steal from recently used but inactive objects
  59         * (unless we are forced to ofc!)
  60         */
  61        if (obj->bind_count)
  62                list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
  63
  64        obj->mm.dirty = true; /* be paranoid  */
  65
  66        if (i915_gem_object_has_active_reference(obj)) {
  67                i915_gem_object_clear_active_reference(obj);
  68                i915_gem_object_put(obj);
  69        }
  70}
  71
  72static struct i915_vma *
  73vma_create(struct drm_i915_gem_object *obj,
  74           struct i915_address_space *vm,
  75           const struct i915_ggtt_view *view)
  76{
  77        struct i915_vma *vma;
  78        struct rb_node *rb, **p;
  79        int i;
  80
  81        /* The aliasing_ppgtt should never be used directly! */
  82        GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
  83
  84        vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
  85        if (vma == NULL)
  86                return ERR_PTR(-ENOMEM);
  87
  88        for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
  89                init_request_active(&vma->last_read[i], i915_vma_retire);
  90        init_request_active(&vma->last_fence, NULL);
  91        vma->vm = vm;
  92        vma->obj = obj;
  93        vma->resv = obj->resv;
  94        vma->size = obj->base.size;
  95        vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
  96
  97        if (view && view->type != I915_GGTT_VIEW_NORMAL) {
  98                vma->ggtt_view = *view;
  99                if (view->type == I915_GGTT_VIEW_PARTIAL) {
 100                        GEM_BUG_ON(range_overflows_t(u64,
 101                                                     view->partial.offset,
 102                                                     view->partial.size,
 103                                                     obj->base.size >> PAGE_SHIFT));
 104                        vma->size = view->partial.size;
 105                        vma->size <<= PAGE_SHIFT;
 106                        GEM_BUG_ON(vma->size >= obj->base.size);
 107                } else if (view->type == I915_GGTT_VIEW_ROTATED) {
 108                        vma->size = intel_rotation_info_size(&view->rotated);
 109                        vma->size <<= PAGE_SHIFT;
 110                }
 111        }
 112
 113        if (unlikely(vma->size > vm->total))
 114                goto err_vma;
 115
 116        GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
 117
 118        if (i915_is_ggtt(vm)) {
 119                if (unlikely(overflows_type(vma->size, u32)))
 120                        goto err_vma;
 121
 122                vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
 123                                                      i915_gem_object_get_tiling(obj),
 124                                                      i915_gem_object_get_stride(obj));
 125                if (unlikely(vma->fence_size < vma->size || /* overflow */
 126                             vma->fence_size > vm->total))
 127                        goto err_vma;
 128
 129                GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
 130
 131                vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
 132                                                                i915_gem_object_get_tiling(obj),
 133                                                                i915_gem_object_get_stride(obj));
 134                GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
 135
 136                vma->flags |= I915_VMA_GGTT;
 137                list_add(&vma->obj_link, &obj->vma_list);
 138        } else {
 139                i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 140                list_add_tail(&vma->obj_link, &obj->vma_list);
 141        }
 142
 143        rb = NULL;
 144        p = &obj->vma_tree.rb_node;
 145        while (*p) {
 146                struct i915_vma *pos;
 147
 148                rb = *p;
 149                pos = rb_entry(rb, struct i915_vma, obj_node);
 150                if (i915_vma_compare(pos, vm, view) < 0)
 151                        p = &rb->rb_right;
 152                else
 153                        p = &rb->rb_left;
 154        }
 155        rb_link_node(&vma->obj_node, rb, p);
 156        rb_insert_color(&vma->obj_node, &obj->vma_tree);
 157        list_add(&vma->vm_link, &vm->unbound_list);
 158
 159        return vma;
 160
 161err_vma:
 162        kmem_cache_free(vm->i915->vmas, vma);
 163        return ERR_PTR(-E2BIG);
 164}
 165
 166static struct i915_vma *
 167vma_lookup(struct drm_i915_gem_object *obj,
 168           struct i915_address_space *vm,
 169           const struct i915_ggtt_view *view)
 170{
 171        struct rb_node *rb;
 172
 173        rb = obj->vma_tree.rb_node;
 174        while (rb) {
 175                struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
 176                long cmp;
 177
 178                cmp = i915_vma_compare(vma, vm, view);
 179                if (cmp == 0)
 180                        return vma;
 181
 182                if (cmp < 0)
 183                        rb = rb->rb_right;
 184                else
 185                        rb = rb->rb_left;
 186        }
 187
 188        return NULL;
 189}
 190
 191/**
 192 * i915_vma_instance - return the singleton instance of the VMA
 193 * @obj: parent &struct drm_i915_gem_object to be mapped
 194 * @vm: address space in which the mapping is located
 195 * @view: additional mapping requirements
 196 *
 197 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
 198 * the same @view characteristics. If a match is not found, one is created.
 199 * Once created, the VMA is kept until either the object is freed, or the
 200 * address space is closed.
 201 *
 202 * Must be called with struct_mutex held.
 203 *
 204 * Returns the vma, or an error pointer.
 205 */
 206struct i915_vma *
 207i915_vma_instance(struct drm_i915_gem_object *obj,
 208                  struct i915_address_space *vm,
 209                  const struct i915_ggtt_view *view)
 210{
 211        struct i915_vma *vma;
 212
 213        lockdep_assert_held(&obj->base.dev->struct_mutex);
 214        GEM_BUG_ON(view && !i915_is_ggtt(vm));
 215        GEM_BUG_ON(vm->closed);
 216
 217        vma = vma_lookup(obj, vm, view);
 218        if (!vma)
 219                vma = vma_create(obj, vm, view);
 220
 221        GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
 222        GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
 223        GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
 224        return vma;
 225}
 226
 227/**
 228 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
 229 * @vma: VMA to map
 230 * @cache_level: mapping cache level
 231 * @flags: flags like global or local mapping
 232 *
 233 * DMA addresses are taken from the scatter-gather table of this object (or of
 234 * this VMA in case of non-default GGTT views) and PTE entries set up.
 235 * Note that DMA addresses are also the only part of the SG table we care about.
 236 */
 237int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 238                  u32 flags)
 239{
 240        u32 bind_flags;
 241        u32 vma_flags;
 242        int ret;
 243
 244        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 245        GEM_BUG_ON(vma->size > vma->node.size);
 246
 247        if (GEM_WARN_ON(range_overflows(vma->node.start,
 248                                        vma->node.size,
 249                                        vma->vm->total)))
 250                return -ENODEV;
 251
 252        if (GEM_WARN_ON(!flags))
 253                return -EINVAL;
 254
 255        bind_flags = 0;
 256        if (flags & PIN_GLOBAL)
 257                bind_flags |= I915_VMA_GLOBAL_BIND;
 258        if (flags & PIN_USER)
 259                bind_flags |= I915_VMA_LOCAL_BIND;
 260
 261        vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 262        if (flags & PIN_UPDATE)
 263                bind_flags |= vma_flags;
 264        else
 265                bind_flags &= ~vma_flags;
 266        if (bind_flags == 0)
 267                return 0;
 268
 269        trace_i915_vma_bind(vma, bind_flags);
 270        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
 271        if (ret)
 272                return ret;
 273
 274        vma->flags |= bind_flags;
 275        return 0;
 276}
 277
 278void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 279{
 280        void __iomem *ptr;
 281
 282        /* Access through the GTT requires the device to be awake. */
 283        assert_rpm_wakelock_held(vma->vm->i915);
 284
 285        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 286        if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
 287                return IO_ERR_PTR(-ENODEV);
 288
 289        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 290        GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
 291
 292        ptr = vma->iomap;
 293        if (ptr == NULL) {
 294                ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
 295                                        vma->node.start,
 296                                        vma->node.size);
 297                if (ptr == NULL)
 298                        return IO_ERR_PTR(-ENOMEM);
 299
 300                vma->iomap = ptr;
 301        }
 302
 303        __i915_vma_pin(vma);
 304        return ptr;
 305}
 306
 307void i915_vma_unpin_and_release(struct i915_vma **p_vma)
 308{
 309        struct i915_vma *vma;
 310        struct drm_i915_gem_object *obj;
 311
 312        vma = fetch_and_zero(p_vma);
 313        if (!vma)
 314                return;
 315
 316        obj = vma->obj;
 317
 318        i915_vma_unpin(vma);
 319        i915_vma_close(vma);
 320
 321        __i915_gem_object_release_unless_active(obj);
 322}
 323
 324bool i915_vma_misplaced(const struct i915_vma *vma,
 325                        u64 size, u64 alignment, u64 flags)
 326{
 327        if (!drm_mm_node_allocated(&vma->node))
 328                return false;
 329
 330        if (vma->node.size < size)
 331                return true;
 332
 333        GEM_BUG_ON(alignment && !is_power_of_2(alignment));
 334        if (alignment && !IS_ALIGNED(vma->node.start, alignment))
 335                return true;
 336
 337        if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
 338                return true;
 339
 340        if (flags & PIN_OFFSET_BIAS &&
 341            vma->node.start < (flags & PIN_OFFSET_MASK))
 342                return true;
 343
 344        if (flags & PIN_OFFSET_FIXED &&
 345            vma->node.start != (flags & PIN_OFFSET_MASK))
 346                return true;
 347
 348        return false;
 349}
 350
 351void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 352{
 353        bool mappable, fenceable;
 354
 355        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 356        GEM_BUG_ON(!vma->fence_size);
 357
 358        /*
 359         * Explicitly disable for rotated VMA since the display does not
 360         * need the fence and the VMA is not accessible to other users.
 361         */
 362        if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
 363                return;
 364
 365        fenceable = (vma->node.size >= vma->fence_size &&
 366                     IS_ALIGNED(vma->node.start, vma->fence_alignment));
 367
 368        mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
 369
 370        if (mappable && fenceable)
 371                vma->flags |= I915_VMA_CAN_FENCE;
 372        else
 373                vma->flags &= ~I915_VMA_CAN_FENCE;
 374}
 375
 376static bool color_differs(struct drm_mm_node *node, unsigned long color)
 377{
 378        return node->allocated && node->color != color;
 379}
 380
 381bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
 382{
 383        struct drm_mm_node *node = &vma->node;
 384        struct drm_mm_node *other;
 385
 386        /*
 387         * On some machines we have to be careful when putting differing types
 388         * of snoopable memory together to avoid the prefetcher crossing memory
 389         * domains and dying. During vm initialisation, we decide whether or not
 390         * these constraints apply and set the drm_mm.color_adjust
 391         * appropriately.
 392         */
 393        if (vma->vm->mm.color_adjust == NULL)
 394                return true;
 395
 396        /* Only valid to be called on an already inserted vma */
 397        GEM_BUG_ON(!drm_mm_node_allocated(node));
 398        GEM_BUG_ON(list_empty(&node->node_list));
 399
 400        other = list_prev_entry(node, node_list);
 401        if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
 402                return false;
 403
 404        other = list_next_entry(node, node_list);
 405        if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
 406                return false;
 407
 408        return true;
 409}
 410
 411/**
 412 * i915_vma_insert - finds a slot for the vma in its address space
 413 * @vma: the vma
 414 * @size: requested size in bytes (can be larger than the VMA)
 415 * @alignment: required alignment
 416 * @flags: mask of PIN_* flags to use
 417 *
 418 * First we try to allocate some free space that meets the requirements for
 419 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 420 * preferrably the oldest idle entry to make room for the new VMA.
 421 *
 422 * Returns:
 423 * 0 on success, negative error code otherwise.
 424 */
 425static int
 426i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 427{
 428        struct drm_i915_private *dev_priv = vma->vm->i915;
 429        struct drm_i915_gem_object *obj = vma->obj;
 430        u64 start, end;
 431        int ret;
 432
 433        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 434        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 435
 436        size = max(size, vma->size);
 437        alignment = max(alignment, vma->display_alignment);
 438        if (flags & PIN_MAPPABLE) {
 439                size = max_t(typeof(size), size, vma->fence_size);
 440                alignment = max_t(typeof(alignment),
 441                                  alignment, vma->fence_alignment);
 442        }
 443
 444        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
 445        GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
 446        GEM_BUG_ON(!is_power_of_2(alignment));
 447
 448        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
 449        GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 450
 451        end = vma->vm->total;
 452        if (flags & PIN_MAPPABLE)
 453                end = min_t(u64, end, dev_priv->ggtt.mappable_end);
 454        if (flags & PIN_ZONE_4G)
 455                end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
 456        GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 457
 458        /* If binding the object/GGTT view requires more space than the entire
 459         * aperture has, reject it early before evicting everything in a vain
 460         * attempt to find space.
 461         */
 462        if (size > end) {
 463                DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
 464                          size, obj->base.size,
 465                          flags & PIN_MAPPABLE ? "mappable" : "total",
 466                          end);
 467                return -ENOSPC;
 468        }
 469
 470        ret = i915_gem_object_pin_pages(obj);
 471        if (ret)
 472                return ret;
 473
 474        if (flags & PIN_OFFSET_FIXED) {
 475                u64 offset = flags & PIN_OFFSET_MASK;
 476                if (!IS_ALIGNED(offset, alignment) ||
 477                    range_overflows(offset, size, end)) {
 478                        ret = -EINVAL;
 479                        goto err_unpin;
 480                }
 481
 482                ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 483                                           size, offset, obj->cache_level,
 484                                           flags);
 485                if (ret)
 486                        goto err_unpin;
 487        } else {
 488                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
 489                                          size, alignment, obj->cache_level,
 490                                          start, end, flags);
 491                if (ret)
 492                        goto err_unpin;
 493
 494                GEM_BUG_ON(vma->node.start < start);
 495                GEM_BUG_ON(vma->node.start + vma->node.size > end);
 496        }
 497        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 498        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 499
 500        list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
 501        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 502        obj->bind_count++;
 503        GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
 504
 505        return 0;
 506
 507err_unpin:
 508        i915_gem_object_unpin_pages(obj);
 509        return ret;
 510}
 511
 512static void
 513i915_vma_remove(struct i915_vma *vma)
 514{
 515        struct drm_i915_gem_object *obj = vma->obj;
 516
 517        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 518        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 519
 520        drm_mm_remove_node(&vma->node);
 521        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 522
 523        /* Since the unbound list is global, only move to that list if
 524         * no more VMAs exist.
 525         */
 526        if (--obj->bind_count == 0)
 527                list_move_tail(&obj->global_link,
 528                               &to_i915(obj->base.dev)->mm.unbound_list);
 529
 530        /* And finally now the object is completely decoupled from this vma,
 531         * we can drop its hold on the backing storage and allow it to be
 532         * reaped by the shrinker.
 533         */
 534        i915_gem_object_unpin_pages(obj);
 535        GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
 536}
 537
 538int __i915_vma_do_pin(struct i915_vma *vma,
 539                      u64 size, u64 alignment, u64 flags)
 540{
 541        const unsigned int bound = vma->flags;
 542        int ret;
 543
 544        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 545        GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 546        GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 547
 548        if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
 549                ret = -EBUSY;
 550                goto err_unpin;
 551        }
 552
 553        if ((bound & I915_VMA_BIND_MASK) == 0) {
 554                ret = i915_vma_insert(vma, size, alignment, flags);
 555                if (ret)
 556                        goto err_unpin;
 557        }
 558
 559        ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
 560        if (ret)
 561                goto err_remove;
 562
 563        if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
 564                __i915_vma_set_map_and_fenceable(vma);
 565
 566        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 567        GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 568        return 0;
 569
 570err_remove:
 571        if ((bound & I915_VMA_BIND_MASK) == 0) {
 572                GEM_BUG_ON(vma->pages);
 573                i915_vma_remove(vma);
 574        }
 575err_unpin:
 576        __i915_vma_unpin(vma);
 577        return ret;
 578}
 579
 580static void i915_vma_destroy(struct i915_vma *vma)
 581{
 582        GEM_BUG_ON(vma->node.allocated);
 583        GEM_BUG_ON(i915_vma_is_active(vma));
 584        GEM_BUG_ON(!i915_vma_is_closed(vma));
 585        GEM_BUG_ON(vma->fence);
 586
 587        list_del(&vma->vm_link);
 588        if (!i915_vma_is_ggtt(vma))
 589                i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 590
 591        kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
 592}
 593
 594void i915_vma_unlink_ctx(struct i915_vma *vma)
 595{
 596        struct i915_gem_context *ctx = vma->ctx;
 597
 598        if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
 599                cancel_work_sync(&ctx->vma_lut.resize);
 600                ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
 601        }
 602
 603        __hlist_del(&vma->ctx_node);
 604        ctx->vma_lut.ht_count--;
 605
 606        if (i915_vma_is_ggtt(vma))
 607                vma->obj->vma_hashed = NULL;
 608        vma->ctx = NULL;
 609
 610        i915_vma_put(vma);
 611}
 612
 613void i915_vma_close(struct i915_vma *vma)
 614{
 615        GEM_BUG_ON(i915_vma_is_closed(vma));
 616        vma->flags |= I915_VMA_CLOSED;
 617
 618        if (vma->ctx)
 619                i915_vma_unlink_ctx(vma);
 620
 621        list_del(&vma->obj_link);
 622        rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 623
 624        if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
 625                WARN_ON(i915_vma_unbind(vma));
 626}
 627
 628static void __i915_vma_iounmap(struct i915_vma *vma)
 629{
 630        GEM_BUG_ON(i915_vma_is_pinned(vma));
 631
 632        if (vma->iomap == NULL)
 633                return;
 634
 635        io_mapping_unmap(vma->iomap);
 636        vma->iomap = NULL;
 637}
 638
 639int i915_vma_unbind(struct i915_vma *vma)
 640{
 641        struct drm_i915_gem_object *obj = vma->obj;
 642        unsigned long active;
 643        int ret;
 644
 645        lockdep_assert_held(&obj->base.dev->struct_mutex);
 646
 647        /* First wait upon any activity as retiring the request may
 648         * have side-effects such as unpinning or even unbinding this vma.
 649         */
 650        active = i915_vma_get_active(vma);
 651        if (active) {
 652                int idx;
 653
 654                /* When a closed VMA is retired, it is unbound - eek.
 655                 * In order to prevent it from being recursively closed,
 656                 * take a pin on the vma so that the second unbind is
 657                 * aborted.
 658                 *
 659                 * Even more scary is that the retire callback may free
 660                 * the object (last active vma). To prevent the explosion
 661                 * we defer the actual object free to a worker that can
 662                 * only proceed once it acquires the struct_mutex (which
 663                 * we currently hold, therefore it cannot free this object
 664                 * before we are finished).
 665                 */
 666                __i915_vma_pin(vma);
 667
 668                for_each_active(active, idx) {
 669                        ret = i915_gem_active_retire(&vma->last_read[idx],
 670                                                     &vma->vm->i915->drm.struct_mutex);
 671                        if (ret)
 672                                break;
 673                }
 674
 675                if (!ret) {
 676                        ret = i915_gem_active_retire(&vma->last_fence,
 677                                                     &vma->vm->i915->drm.struct_mutex);
 678                }
 679
 680                __i915_vma_unpin(vma);
 681                if (ret)
 682                        return ret;
 683
 684                GEM_BUG_ON(i915_vma_is_active(vma));
 685        }
 686
 687        if (i915_vma_is_pinned(vma))
 688                return -EBUSY;
 689
 690        if (!drm_mm_node_allocated(&vma->node))
 691                goto destroy;
 692
 693        GEM_BUG_ON(obj->bind_count == 0);
 694        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 695
 696        if (i915_vma_is_map_and_fenceable(vma)) {
 697                /* release the fence reg _after_ flushing */
 698                ret = i915_vma_put_fence(vma);
 699                if (ret)
 700                        return ret;
 701
 702                /* Force a pagefault for domain tracking on next user access */
 703                i915_gem_release_mmap(obj);
 704
 705                __i915_vma_iounmap(vma);
 706                vma->flags &= ~I915_VMA_CAN_FENCE;
 707        }
 708
 709        if (likely(!vma->vm->closed)) {
 710                trace_i915_vma_unbind(vma);
 711                vma->vm->unbind_vma(vma);
 712        }
 713        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 714
 715        if (vma->pages != obj->mm.pages) {
 716                GEM_BUG_ON(!vma->pages);
 717                sg_free_table(vma->pages);
 718                kfree(vma->pages);
 719        }
 720        vma->pages = NULL;
 721
 722        i915_vma_remove(vma);
 723
 724destroy:
 725        if (unlikely(i915_vma_is_closed(vma)))
 726                i915_vma_destroy(vma);
 727
 728        return 0;
 729}
 730
 731#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 732#include "selftests/i915_vma.c"
 733#endif
 734