linux/drivers/gpu/drm/i915/i915_gem_evict.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2010 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eric Anholt <eric@anholt.net>
  25 *    Chris Wilson <chris@chris-wilson.co.uuk>
  26 *
  27 */
  28
  29#include <drm/i915_drm.h>
  30
  31#include "gem/i915_gem_context.h"
  32
  33#include "i915_drv.h"
  34#include "intel_drv.h"
  35#include "i915_trace.h"
  36
  37I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
  38        bool fail_if_busy:1;
  39} igt_evict_ctl;)
  40
  41static int ggtt_flush(struct drm_i915_private *i915)
  42{
  43        /*
  44         * Not everything in the GGTT is tracked via vma (otherwise we
  45         * could evict as required with minimal stalling) so we are forced
  46         * to idle the GPU and explicitly retire outstanding requests in
  47         * the hopes that we can then remove contexts and the like only
  48         * bound by their active reference.
  49         */
  50        return i915_gem_wait_for_idle(i915,
  51                                      I915_WAIT_INTERRUPTIBLE |
  52                                      I915_WAIT_LOCKED,
  53                                      MAX_SCHEDULE_TIMEOUT);
  54}
  55
  56static bool
  57mark_free(struct drm_mm_scan *scan,
  58          struct i915_vma *vma,
  59          unsigned int flags,
  60          struct list_head *unwind)
  61{
  62        if (i915_vma_is_pinned(vma))
  63                return false;
  64
  65        if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
  66                return false;
  67
  68        list_add(&vma->evict_link, unwind);
  69        return drm_mm_scan_add_block(scan, &vma->node);
  70}
  71
  72/**
  73 * i915_gem_evict_something - Evict vmas to make room for binding a new one
  74 * @vm: address space to evict from
  75 * @min_size: size of the desired free space
  76 * @alignment: alignment constraint of the desired free space
  77 * @cache_level: cache_level for the desired space
  78 * @start: start (inclusive) of the range from which to evict objects
  79 * @end: end (exclusive) of the range from which to evict objects
  80 * @flags: additional flags to control the eviction algorithm
  81 *
  82 * This function will try to evict vmas until a free space satisfying the
  83 * requirements is found. Callers must check first whether any such hole exists
  84 * already before calling this function.
  85 *
  86 * This function is used by the object/vma binding code.
  87 *
  88 * Since this function is only used to free up virtual address space it only
  89 * ignores pinned vmas, and not object where the backing storage itself is
  90 * pinned. Hence obj->pages_pin_count does not protect against eviction.
  91 *
  92 * To clarify: This is for freeing up virtual address space, not for freeing
  93 * memory in e.g. the shrinker.
  94 */
  95int
  96i915_gem_evict_something(struct i915_address_space *vm,
  97                         u64 min_size, u64 alignment,
  98                         unsigned cache_level,
  99                         u64 start, u64 end,
 100                         unsigned flags)
 101{
 102        struct drm_i915_private *dev_priv = vm->i915;
 103        struct drm_mm_scan scan;
 104        struct list_head eviction_list;
 105        struct i915_vma *vma, *next;
 106        struct drm_mm_node *node;
 107        enum drm_mm_insert_mode mode;
 108        struct i915_vma *active;
 109        int ret;
 110
 111        lockdep_assert_held(&vm->i915->drm.struct_mutex);
 112        trace_i915_gem_evict(vm, min_size, alignment, flags);
 113
 114        /*
 115         * The goal is to evict objects and amalgamate space in rough LRU order.
 116         * Since both active and inactive objects reside on the same list,
 117         * in a mix of creation and last scanned order, as we process the list
 118         * we sort it into inactive/active, which keeps the active portion
 119         * in a rough MRU order.
 120         *
 121         * The retirement sequence is thus:
 122         *   1. Inactive objects (already retired, random order)
 123         *   2. Active objects (will stall on unbinding, oldest scanned first)
 124         */
 125        mode = DRM_MM_INSERT_BEST;
 126        if (flags & PIN_HIGH)
 127                mode = DRM_MM_INSERT_HIGH;
 128        if (flags & PIN_MAPPABLE)
 129                mode = DRM_MM_INSERT_LOW;
 130        drm_mm_scan_init_with_range(&scan, &vm->mm,
 131                                    min_size, alignment, cache_level,
 132                                    start, end, mode);
 133
 134        /*
 135         * Retire before we search the active list. Although we have
 136         * reasonable accuracy in our retirement lists, we may have
 137         * a stray pin (preventing eviction) that can only be resolved by
 138         * retiring.
 139         */
 140        if (!(flags & PIN_NONBLOCK))
 141                i915_retire_requests(dev_priv);
 142
 143search_again:
 144        active = NULL;
 145        INIT_LIST_HEAD(&eviction_list);
 146        list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
 147                /*
 148                 * We keep this list in a rough least-recently scanned order
 149                 * of active elements (inactive elements are cheap to reap).
 150                 * New entries are added to the end, and we move anything we
 151                 * scan to the end. The assumption is that the working set
 152                 * of applications is either steady state (and thanks to the
 153                 * userspace bo cache it almost always is) or volatile and
 154                 * frequently replaced after a frame, which are self-evicting!
 155                 * Given that assumption, the MRU order of the scan list is
 156                 * fairly static, and keeping it in least-recently scan order
 157                 * is suitable.
 158                 *
 159                 * To notice when we complete one full cycle, we record the
 160                 * first active element seen, before moving it to the tail.
 161                 */
 162                if (i915_vma_is_active(vma)) {
 163                        if (vma == active) {
 164                                if (flags & PIN_NONBLOCK)
 165                                        break;
 166
 167                                active = ERR_PTR(-EAGAIN);
 168                        }
 169
 170                        if (active != ERR_PTR(-EAGAIN)) {
 171                                if (!active)
 172                                        active = vma;
 173
 174                                list_move_tail(&vma->vm_link, &vm->bound_list);
 175                                continue;
 176                        }
 177                }
 178
 179                if (mark_free(&scan, vma, flags, &eviction_list))
 180                        goto found;
 181        }
 182
 183        /* Nothing found, clean up and bail out! */
 184        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 185                ret = drm_mm_scan_remove_block(&scan, &vma->node);
 186                BUG_ON(ret);
 187        }
 188
 189        /*
 190         * Can we unpin some objects such as idle hw contents,
 191         * or pending flips? But since only the GGTT has global entries
 192         * such as scanouts, rinbuffers and contexts, we can skip the
 193         * purge when inspecting per-process local address spaces.
 194         */
 195        if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 196                return -ENOSPC;
 197
 198        /*
 199         * Not everything in the GGTT is tracked via VMA using
 200         * i915_vma_move_to_active(), otherwise we could evict as required
 201         * with minimal stalling. Instead we are forced to idle the GPU and
 202         * explicitly retire outstanding requests which will then remove
 203         * the pinning for active objects such as contexts and ring,
 204         * enabling us to evict them on the next iteration.
 205         *
 206         * To ensure that all user contexts are evictable, we perform
 207         * a switch to the perma-pinned kernel context. This all also gives
 208         * us a termination condition, when the last retired context is
 209         * the kernel's there is no more we can evict.
 210         */
 211        if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
 212                return -EBUSY;
 213
 214        ret = ggtt_flush(dev_priv);
 215        if (ret)
 216                return ret;
 217
 218        cond_resched();
 219
 220        flags |= PIN_NONBLOCK;
 221        goto search_again;
 222
 223found:
 224        /* drm_mm doesn't allow any other other operations while
 225         * scanning, therefore store to-be-evicted objects on a
 226         * temporary list and take a reference for all before
 227         * calling unbind (which may remove the active reference
 228         * of any of our objects, thus corrupting the list).
 229         */
 230        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 231                if (drm_mm_scan_remove_block(&scan, &vma->node))
 232                        __i915_vma_pin(vma);
 233                else
 234                        list_del(&vma->evict_link);
 235        }
 236
 237        /* Unbinding will emit any required flushes */
 238        ret = 0;
 239        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 240                __i915_vma_unpin(vma);
 241                if (ret == 0)
 242                        ret = i915_vma_unbind(vma);
 243        }
 244
 245        while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
 246                vma = container_of(node, struct i915_vma, node);
 247                ret = i915_vma_unbind(vma);
 248        }
 249
 250        return ret;
 251}
 252
 253/**
 254 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
 255 * @vm: address space to evict from
 256 * @target: range (and color) to evict for
 257 * @flags: additional flags to control the eviction algorithm
 258 *
 259 * This function will try to evict vmas that overlap the target node.
 260 *
 261 * To clarify: This is for freeing up virtual address space, not for freeing
 262 * memory in e.g. the shrinker.
 263 */
 264int i915_gem_evict_for_node(struct i915_address_space *vm,
 265                            struct drm_mm_node *target,
 266                            unsigned int flags)
 267{
 268        LIST_HEAD(eviction_list);
 269        struct drm_mm_node *node;
 270        u64 start = target->start;
 271        u64 end = start + target->size;
 272        struct i915_vma *vma, *next;
 273        bool check_color;
 274        int ret = 0;
 275
 276        lockdep_assert_held(&vm->i915->drm.struct_mutex);
 277        GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 278        GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 279
 280        trace_i915_gem_evict_node(vm, target, flags);
 281
 282        /* Retire before we search the active list. Although we have
 283         * reasonable accuracy in our retirement lists, we may have
 284         * a stray pin (preventing eviction) that can only be resolved by
 285         * retiring.
 286         */
 287        if (!(flags & PIN_NONBLOCK))
 288                i915_retire_requests(vm->i915);
 289
 290        check_color = vm->mm.color_adjust;
 291        if (check_color) {
 292                /* Expand search to cover neighbouring guard pages (or lack!) */
 293                if (start)
 294                        start -= I915_GTT_PAGE_SIZE;
 295
 296                /* Always look at the page afterwards to avoid the end-of-GTT */
 297                end += I915_GTT_PAGE_SIZE;
 298        }
 299        GEM_BUG_ON(start >= end);
 300
 301        drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
 302                /* If we find any non-objects (!vma), we cannot evict them */
 303                if (node->color == I915_COLOR_UNEVICTABLE) {
 304                        ret = -ENOSPC;
 305                        break;
 306                }
 307
 308                GEM_BUG_ON(!node->allocated);
 309                vma = container_of(node, typeof(*vma), node);
 310
 311                /* If we are using coloring to insert guard pages between
 312                 * different cache domains within the address space, we have
 313                 * to check whether the objects on either side of our range
 314                 * abutt and conflict. If they are in conflict, then we evict
 315                 * those as well to make room for our guard pages.
 316                 */
 317                if (check_color) {
 318                        if (node->start + node->size == target->start) {
 319                                if (node->color == target->color)
 320                                        continue;
 321                        }
 322                        if (node->start == target->start + target->size) {
 323                                if (node->color == target->color)
 324                                        continue;
 325                        }
 326                }
 327
 328                if (flags & PIN_NONBLOCK &&
 329                    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
 330                        ret = -ENOSPC;
 331                        break;
 332                }
 333
 334                if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
 335                        ret = -ENOSPC;
 336                        break;
 337                }
 338
 339                /* Overlap of objects in the same batch? */
 340                if (i915_vma_is_pinned(vma)) {
 341                        ret = -ENOSPC;
 342                        if (vma->exec_flags &&
 343                            *vma->exec_flags & EXEC_OBJECT_PINNED)
 344                                ret = -EINVAL;
 345                        break;
 346                }
 347
 348                /* Never show fear in the face of dragons!
 349                 *
 350                 * We cannot directly remove this node from within this
 351                 * iterator and as with i915_gem_evict_something() we employ
 352                 * the vma pin_count in order to prevent the action of
 353                 * unbinding one vma from freeing (by dropping its active
 354                 * reference) another in our eviction list.
 355                 */
 356                __i915_vma_pin(vma);
 357                list_add(&vma->evict_link, &eviction_list);
 358        }
 359
 360        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 361                __i915_vma_unpin(vma);
 362                if (ret == 0)
 363                        ret = i915_vma_unbind(vma);
 364        }
 365
 366        return ret;
 367}
 368
 369/**
 370 * i915_gem_evict_vm - Evict all idle vmas from a vm
 371 * @vm: Address space to cleanse
 372 *
 373 * This function evicts all vmas from a vm.
 374 *
 375 * This is used by the execbuf code as a last-ditch effort to defragment the
 376 * address space.
 377 *
 378 * To clarify: This is for freeing up virtual address space, not for freeing
 379 * memory in e.g. the shrinker.
 380 */
 381int i915_gem_evict_vm(struct i915_address_space *vm)
 382{
 383        struct list_head eviction_list;
 384        struct i915_vma *vma, *next;
 385        int ret;
 386
 387        lockdep_assert_held(&vm->i915->drm.struct_mutex);
 388        trace_i915_gem_evict_vm(vm);
 389
 390        /* Switch back to the default context in order to unpin
 391         * the existing context objects. However, such objects only
 392         * pin themselves inside the global GTT and performing the
 393         * switch otherwise is ineffective.
 394         */
 395        if (i915_is_ggtt(vm)) {
 396                ret = ggtt_flush(vm->i915);
 397                if (ret)
 398                        return ret;
 399        }
 400
 401        INIT_LIST_HEAD(&eviction_list);
 402        mutex_lock(&vm->mutex);
 403        list_for_each_entry(vma, &vm->bound_list, vm_link) {
 404                if (i915_vma_is_pinned(vma))
 405                        continue;
 406
 407                __i915_vma_pin(vma);
 408                list_add(&vma->evict_link, &eviction_list);
 409        }
 410        mutex_unlock(&vm->mutex);
 411
 412        ret = 0;
 413        list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 414                __i915_vma_unpin(vma);
 415                if (ret == 0)
 416                        ret = i915_vma_unbind(vma);
 417        }
 418        return ret;
 419}
 420
 421#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 422#include "selftests/i915_gem_evict.c"
 423#endif
 424