linux/drivers/gpu/drm/i915/selftests/i915_gem.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2018 Intel Corporation
   5 */
   6
   7#include <linux/random.h>
   8
   9#include "gem/selftests/igt_gem_utils.h"
  10#include "gem/selftests/mock_context.h"
  11#include "gem/i915_gem_pm.h"
  12#include "gt/intel_gt.h"
  13#include "gt/intel_gt_pm.h"
  14
  15#include "i915_selftest.h"
  16
  17#include "igt_flush_test.h"
  18#include "mock_drm.h"
  19
  20static int switch_to_context(struct i915_gem_context *ctx)
  21{
  22        struct i915_gem_engines_iter it;
  23        struct intel_context *ce;
  24        int err = 0;
  25
  26        for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
  27                struct i915_request *rq;
  28
  29                rq = intel_context_create_request(ce);
  30                if (IS_ERR(rq)) {
  31                        err = PTR_ERR(rq);
  32                        break;
  33                }
  34
  35                i915_request_add(rq);
  36        }
  37        i915_gem_context_unlock_engines(ctx);
  38
  39        return err;
  40}
  41
  42static void trash_stolen(struct drm_i915_private *i915)
  43{
  44        struct i915_ggtt *ggtt = &i915->ggtt;
  45        const u64 slot = ggtt->error_capture.start;
  46        const resource_size_t size = resource_size(&i915->dsm);
  47        unsigned long page;
  48        u32 prng = 0x12345678;
  49
  50        /* XXX: fsck. needs some more thought... */
  51        if (!i915_ggtt_has_aperture(ggtt))
  52                return;
  53
  54        for (page = 0; page < size; page += PAGE_SIZE) {
  55                const dma_addr_t dma = i915->dsm.start + page;
  56                u32 __iomem *s;
  57                int x;
  58
  59                ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
  60
  61                s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
  62                for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
  63                        prng = next_pseudo_random32(prng);
  64                        iowrite32(prng, &s[x]);
  65                }
  66                io_mapping_unmap_atomic(s);
  67        }
  68
  69        ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
  70}
  71
  72static void simulate_hibernate(struct drm_i915_private *i915)
  73{
  74        intel_wakeref_t wakeref;
  75
  76        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
  77
  78        /*
  79         * As a final sting in the tail, invalidate stolen. Under a real S4,
  80         * stolen is lost and needs to be refilled on resume. However, under
  81         * CI we merely do S4-device testing (as full S4 is too unreliable
  82         * for automated testing across a cluster), so to simulate the effect
  83         * of stolen being trashed across S4, we trash it ourselves.
  84         */
  85        trash_stolen(i915);
  86
  87        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
  88}
  89
  90static int igt_pm_prepare(struct drm_i915_private *i915)
  91{
  92        i915_gem_suspend(i915);
  93
  94        return 0;
  95}
  96
  97static void igt_pm_suspend(struct drm_i915_private *i915)
  98{
  99        intel_wakeref_t wakeref;
 100
 101        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
 102                i915_ggtt_suspend(&i915->ggtt);
 103                i915_gem_suspend_late(i915);
 104        }
 105}
 106
 107static void igt_pm_hibernate(struct drm_i915_private *i915)
 108{
 109        intel_wakeref_t wakeref;
 110
 111        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
 112                i915_ggtt_suspend(&i915->ggtt);
 113
 114                i915_gem_freeze(i915);
 115                i915_gem_freeze_late(i915);
 116        }
 117}
 118
 119static void igt_pm_resume(struct drm_i915_private *i915)
 120{
 121        intel_wakeref_t wakeref;
 122
 123        /*
 124         * Both suspend and hibernate follow the same wakeup path and assume
 125         * that runtime-pm just works.
 126         */
 127        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
 128                i915_ggtt_resume(&i915->ggtt);
 129                i915_gem_resume(i915);
 130        }
 131}
 132
 133static int igt_gem_suspend(void *arg)
 134{
 135        struct drm_i915_private *i915 = arg;
 136        struct i915_gem_context *ctx;
 137        struct file *file;
 138        int err;
 139
 140        file = mock_file(i915);
 141        if (IS_ERR(file))
 142                return PTR_ERR(file);
 143
 144        err = -ENOMEM;
 145        ctx = live_context(i915, file);
 146        if (!IS_ERR(ctx))
 147                err = switch_to_context(ctx);
 148        if (err)
 149                goto out;
 150
 151        err = igt_pm_prepare(i915);
 152        if (err)
 153                goto out;
 154
 155        igt_pm_suspend(i915);
 156
 157        /* Here be dragons! Note that with S3RST any S3 may become S4! */
 158        simulate_hibernate(i915);
 159
 160        igt_pm_resume(i915);
 161
 162        err = switch_to_context(ctx);
 163out:
 164        fput(file);
 165        return err;
 166}
 167
 168static int igt_gem_hibernate(void *arg)
 169{
 170        struct drm_i915_private *i915 = arg;
 171        struct i915_gem_context *ctx;
 172        struct file *file;
 173        int err;
 174
 175        file = mock_file(i915);
 176        if (IS_ERR(file))
 177                return PTR_ERR(file);
 178
 179        err = -ENOMEM;
 180        ctx = live_context(i915, file);
 181        if (!IS_ERR(ctx))
 182                err = switch_to_context(ctx);
 183        if (err)
 184                goto out;
 185
 186        err = igt_pm_prepare(i915);
 187        if (err)
 188                goto out;
 189
 190        igt_pm_hibernate(i915);
 191
 192        /* Here be dragons! */
 193        simulate_hibernate(i915);
 194
 195        igt_pm_resume(i915);
 196
 197        err = switch_to_context(ctx);
 198out:
 199        fput(file);
 200        return err;
 201}
 202
 203static int igt_gem_ww_ctx(void *arg)
 204{
 205        struct drm_i915_private *i915 = arg;
 206        struct drm_i915_gem_object *obj, *obj2;
 207        struct i915_gem_ww_ctx ww;
 208        int err = 0;
 209
 210        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
 211        if (IS_ERR(obj))
 212                return PTR_ERR(obj);
 213
 214        obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE);
 215        if (IS_ERR(obj2)) {
 216                err = PTR_ERR(obj2);
 217                goto put1;
 218        }
 219
 220        i915_gem_ww_ctx_init(&ww, true);
 221retry:
 222        /* Lock the objects, twice for good measure (-EALREADY handling) */
 223        err = i915_gem_object_lock(obj, &ww);
 224        if (!err)
 225                err = i915_gem_object_lock_interruptible(obj, &ww);
 226        if (!err)
 227                err = i915_gem_object_lock_interruptible(obj2, &ww);
 228        if (!err)
 229                err = i915_gem_object_lock(obj2, &ww);
 230
 231        if (err == -EDEADLK) {
 232                err = i915_gem_ww_ctx_backoff(&ww);
 233                if (!err)
 234                        goto retry;
 235        }
 236        i915_gem_ww_ctx_fini(&ww);
 237        i915_gem_object_put(obj2);
 238put1:
 239        i915_gem_object_put(obj);
 240        return err;
 241}
 242
 243int i915_gem_live_selftests(struct drm_i915_private *i915)
 244{
 245        static const struct i915_subtest tests[] = {
 246                SUBTEST(igt_gem_suspend),
 247                SUBTEST(igt_gem_hibernate),
 248                SUBTEST(igt_gem_ww_ctx),
 249        };
 250
 251        if (intel_gt_is_wedged(&i915->gt))
 252                return 0;
 253
 254        return i915_live_subtests(tests, i915);
 255}
 256