linux/drivers/gpu/drm/i915/i915_gem_evict.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uuk>
  26 *
  27 */
  28
  29#include "gem/i915_gem_context.h"
  30#include "gt/intel_gt_requests.h"
  31
  32#include "i915_drv.h"
  33#include "i915_trace.h"
  34
  35I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
  36        bool fail_if_busy:1;
  37} igt_evict_ctl;)
  38
  39static int ggtt_flush(struct intel_gt *gt)
  40{
  41        /*
  42         * Not everything in the GGTT is tracked via vma (otherwise we
  43         * could evict as required with minimal stalling) so we are forced
  44         * to idle the GPU and explicitly retire outstanding requests in
  45         * the hopes that we can then remove contexts and the like only
  46         * bound by their active reference.
  47         */
  48        return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
  49}
  50
  51static bool
  52mark_free(struct drm_mm_scan *scan,
  53          struct i915_vma *vma,
  54          unsigned int flags,
  55          struct list_head *unwind)
  56{
  57        if (i915_vma_is_pinned(vma))
  58                return false;
  59
  60        list_add(&vma->evict_link, unwind);
  61        return drm_mm_scan_add_block(scan, &vma->node);
  62}
  63
  64/**
  65 * i915_gem_evict_something - Evict vmas to make room for binding a new one
  66 * @vm: address space to evict from
  67 * @min_size: size of the desired free space
  68 * @alignment: alignment constraint of the desired free space
  69 * @color: color for the desired space
  70 * @start: start (inclusive) of the range from which to evict objects
  71 * @end: end (exclusive) of the range from which to evict objects
  72 * @flags: additional flags to control the eviction algorithm
  73 *
  74 * This function will try to evict vmas until a free space satisfying the
  75 * requirements is found. Callers must check first whether any such hole exists
  76 * already before calling this function.
  77 *
  78 * This function is used by the object/vma binding code.
  79 *
  80 * Since this function is only used to free up virtual address space it only
  81 * ignores pinned vmas, and not object where the backing storage itself is
  82 * pinned. Hence obj->pages_pin_count does not protect against eviction.
  83 *
  84 * To clarify: This is for freeing up virtual address space, not for freeing
  85 * memory in e.g. the shrinker.
  86 */
  87int
  88i915_gem_evict_something(struct i915_address_space *vm,
  89                         u64 min_size, u64 alignment,
  90                         unsigned long color,
  91                         u64 start, u64 end,
  92                         unsigned flags)
  93{
  94        struct drm_mm_scan scan;
  95        struct list_head eviction_list;
  96        struct i915_vma *vma, *next;
  97        struct drm_mm_node *node;
  98        enum drm_mm_insert_mode mode;
  99        struct i915_vma *active;
 100        int ret;
 101
 102        lockdep_assert_held(&vm->mutex);
 103        trace_i915_gem_evict(vm, min_size, alignment, flags);
 104
 105        /*
 106         * The goal is to evict objects and amalgamate space in rough LRU order.
 107         * Since both active and inactive objects reside on the same list,
 108         * in a mix of creation and last scanned order, as we process the list
 109         * we sort it into inactive/active, which keeps the active portion
 110         * in a rough MRU order.
 111         *
 112         * The retirement sequence is thus:
 113         *   1. Inactive objects (already retired, random order)
 114         *   2. Active objects (will stall on unbinding, oldest scanned first)
 115         */
 116        mode = DRM_MM_INSERT_BEST;
 117        if (flags & PIN_HIGH)
 118                mode = DRM_MM_INSERT_HIGH;
 119        if (flags & PIN_MAPPABLE)
 120                mode = DRM_MM_INSERT_LOW;
 121        drm_mm_scan_init_with_range(&scan, &vm->mm,
 122                                    min_size, alignment, color,
 123                                    start, end, mode);
 124
 125        intel_gt_retire_requests(vm->gt);
 126
 127search_again:
 128        active = NULL;
 129        INIT_LIST_HEAD(&eviction_list);
 130        list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
 131                if (vma == active) { /* now seen this vma twice */
 132                        if (flags & PIN_NONBLOCK)
 133                                break;
 134
 135                        active = ERR_PTR(-EAGAIN);
 136                }
 137
 138                /*
 139                 * We keep this list in a rough least-recently scanned order
 140                 * of active elements (inactive elements are cheap to reap).
 141                 * New entries are added to the end, and we move anything we
 142                 * scan to the end. The assumption is that the working set
 143                 * of applications is either steady state (and thanks to the
 144                 * userspace bo cache it almost always is) or volatile and
 145                 * frequently replaced after a frame, which are self-evicting!
 146                 * Given that assumption, the MRU order of the scan list is
 147                 * fairly static, and keeping it in least-recently scan order
 148                 * is suitable.
 149                 *
 150                 * To notice when we complete one full cycle, we record the
 151                 * first active element seen, before moving it to the tail.
 152                 */
 153                if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
 154                        if (!active)
 155                                active = vma;
 156
 157                        list_move_tail(&vma->vm_link, &vm->bound_list);
 158                        continue;
 159                }
 160
 161                if (mark_free(&scan, vma, flags, &eviction_list))
 162                        goto found;
 163        }
 164
 165        /* Nothing found, clean up and bail out! */
 166        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 167                ret = drm_mm_scan_remove_block(&scan, &vma->node);
 168                BUG_ON(ret);
 169        }
 170
 171        /*
 172         * Can we unpin some objects such as idle hw contents,
 173         * or pending flips? But since only the GGTT has global entries
 174         * such as scanouts, rinbuffers and contexts, we can skip the
 175         * purge when inspecting per-process local address spaces.
 176         */
 177        if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 178                return -ENOSPC;
 179
 180        /*
 181         * Not everything in the GGTT is tracked via VMA using
 182         * i915_vma_move_to_active(), otherwise we could evict as required
 183         * with minimal stalling. Instead we are forced to idle the GPU and
 184         * explicitly retire outstanding requests which will then remove
 185         * the pinning for active objects such as contexts and ring,
 186         * enabling us to evict them on the next iteration.
 187         *
 188         * To ensure that all user contexts are evictable, we perform
 189         * a switch to the perma-pinned kernel context. This all also gives
 190         * us a termination condition, when the last retired context is
 191         * the kernel's there is no more we can evict.
 192         */
 193        if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
 194                return -EBUSY;
 195
 196        ret = ggtt_flush(vm->gt);
 197        if (ret)
 198                return ret;
 199
 200        cond_resched();
 201
 202        flags |= PIN_NONBLOCK;
 203        goto search_again;
 204
 205found:
 206        /* drm_mm doesn't allow any other other operations while
 207         * scanning, therefore store to-be-evicted objects on a
 208         * temporary list and take a reference for all before
 209         * calling unbind (which may remove the active reference
 210         * of any of our objects, thus corrupting the list).
 211         */
 212        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 213                if (drm_mm_scan_remove_block(&scan, &vma->node))
 214                        __i915_vma_pin(vma);
 215                else
 216                        list_del(&vma->evict_link);
 217        }
 218
 219        /* Unbinding will emit any required flushes */
 220        ret = 0;
 221        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 222                __i915_vma_unpin(vma);
 223                if (ret == 0)
 224                        ret = __i915_vma_unbind(vma);
 225        }
 226
 227        while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
 228                vma = container_of(node, struct i915_vma, node);
 229
 230                /* If we find any non-objects (!vma), we cannot evict them */
 231                if (vma->node.color != I915_COLOR_UNEVICTABLE)
 232                        ret = __i915_vma_unbind(vma);
 233                else
 234                        ret = -ENOSPC; /* XXX search failed, try again? */
 235        }
 236
 237        return ret;
 238}
 239
 240/**
 241 * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
 242 * @vm: address space to evict from
 243 * @target: range (and color) to evict for
 244 * @flags: additional flags to control the eviction algorithm
 245 *
 246 * This function will try to evict vmas that overlap the target node.
 247 *
 248 * To clarify: This is for freeing up virtual address space, not for freeing
 249 * memory in e.g. the shrinker.
 250 */
 251int i915_gem_evict_for_node(struct i915_address_space *vm,
 252                            struct drm_mm_node *target,
 253                            unsigned int flags)
 254{
 255        LIST_HEAD(eviction_list);
 256        struct drm_mm_node *node;
 257        u64 start = target->start;
 258        u64 end = start + target->size;
 259        struct i915_vma *vma, *next;
 260        int ret = 0;
 261
 262        lockdep_assert_held(&vm->mutex);
 263        GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 264        GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 265
 266        trace_i915_gem_evict_node(vm, target, flags);
 267
 268        /*
 269         * Retire before we search the active list. Although we have
 270         * reasonable accuracy in our retirement lists, we may have
 271         * a stray pin (preventing eviction) that can only be resolved by
 272         * retiring.
 273         */
 274        intel_gt_retire_requests(vm->gt);
 275
 276        if (i915_vm_has_cache_coloring(vm)) {
 277                /* Expand search to cover neighbouring guard pages (or lack!) */
 278                if (start)
 279                        start -= I915_GTT_PAGE_SIZE;
 280
 281                /* Always look at the page afterwards to avoid the end-of-GTT */
 282                end += I915_GTT_PAGE_SIZE;
 283        }
 284        GEM_BUG_ON(start >= end);
 285
 286        drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
 287                /* If we find any non-objects (!vma), we cannot evict them */
 288                if (node->color == I915_COLOR_UNEVICTABLE) {
 289                        ret = -ENOSPC;
 290                        break;
 291                }
 292
 293                GEM_BUG_ON(!drm_mm_node_allocated(node));
 294                vma = container_of(node, typeof(*vma), node);
 295
 296                /*
 297                 * If we are using coloring to insert guard pages between
 298                 * different cache domains within the address space, we have
 299                 * to check whether the objects on either side of our range
 300                 * abutt and conflict. If they are in conflict, then we evict
 301                 * those as well to make room for our guard pages.
 302                 */
 303                if (i915_vm_has_cache_coloring(vm)) {
 304                        if (node->start + node->size == target->start) {
 305                                if (node->color == target->color)
 306                                        continue;
 307                        }
 308                        if (node->start == target->start + target->size) {
 309                                if (node->color == target->color)
 310                                        continue;
 311                        }
 312                }
 313
 314                if (i915_vma_is_pinned(vma)) {
 315                        ret = -ENOSPC;
 316                        break;
 317                }
 318
 319                if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
 320                        ret = -ENOSPC;
 321                        break;
 322                }
 323
 324                /*
 325                 * Never show fear in the face of dragons!
 326                 *
 327                 * We cannot directly remove this node from within this
 328                 * iterator and as with i915_gem_evict_something() we employ
 329                 * the vma pin_count in order to prevent the action of
 330                 * unbinding one vma from freeing (by dropping its active
 331                 * reference) another in our eviction list.
 332                 */
 333                __i915_vma_pin(vma);
 334                list_add(&vma->evict_link, &eviction_list);
 335        }
 336
 337        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 338                __i915_vma_unpin(vma);
 339                if (ret == 0)
 340                        ret = __i915_vma_unbind(vma);
 341        }
 342
 343        return ret;
 344}
 345
 346/**
 347 * i915_gem_evict_vm - Evict all idle vmas from a vm
 348 * @vm: Address space to cleanse
 349 *
 350 * This function evicts all vmas from a vm.
 351 *
 352 * This is used by the execbuf code as a last-ditch effort to defragment the
 353 * address space.
 354 *
 355 * To clarify: This is for freeing up virtual address space, not for freeing
 356 * memory in e.g. the shrinker.
 357 */
 358int i915_gem_evict_vm(struct i915_address_space *vm)
 359{
 360        int ret = 0;
 361
 362        lockdep_assert_held(&vm->mutex);
 363        trace_i915_gem_evict_vm(vm);
 364
 365        /* Switch back to the default context in order to unpin
 366         * the existing context objects. However, such objects only
 367         * pin themselves inside the global GTT and performing the
 368         * switch otherwise is ineffective.
 369         */
 370        if (i915_is_ggtt(vm)) {
 371                ret = ggtt_flush(vm->gt);
 372                if (ret)
 373                        return ret;
 374        }
 375
 376        do {
 377                struct i915_vma *vma, *vn;
 378                LIST_HEAD(eviction_list);
 379
 380                list_for_each_entry(vma, &vm->bound_list, vm_link) {
 381                        if (i915_vma_is_pinned(vma))
 382                                continue;
 383
 384                        __i915_vma_pin(vma);
 385                        list_add(&vma->evict_link, &eviction_list);
 386                }
 387                if (list_empty(&eviction_list))
 388                        break;
 389
 390                ret = 0;
 391                list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
 392                        __i915_vma_unpin(vma);
 393                        if (ret == 0)
 394                                ret = __i915_vma_unbind(vma);
 395                        if (ret != -EINTR) /* "Get me out of here!" */
 396                                ret = 0;
 397                }
 398        } while (ret == 0);
 399
 400        return ret;
 401}
 402
 403#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 404#include "selftests/i915_gem_evict.c"
 405#endif
 406