linux/drivers/gpu/drm/i915/gt/selftest_context.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include "i915_selftest.h"
   7#include "intel_engine_heartbeat.h"
   8#include "intel_engine_pm.h"
   9#include "intel_gt.h"
  10
  11#include "gem/selftests/mock_context.h"
  12#include "selftests/igt_flush_test.h"
  13#include "selftests/mock_drm.h"
  14
  15static int request_sync(struct i915_request *rq)
  16{
  17        struct intel_timeline *tl = i915_request_timeline(rq);
  18        long timeout;
  19        int err = 0;
  20
  21        intel_timeline_get(tl);
  22        i915_request_get(rq);
  23
  24        /* Opencode i915_request_add() so we can keep the timeline locked. */
  25        __i915_request_commit(rq);
  26        rq->sched.attr.priority = I915_PRIORITY_BARRIER;
  27        __i915_request_queue_bh(rq);
  28
  29        timeout = i915_request_wait(rq, 0, HZ / 10);
  30        if (timeout < 0)
  31                err = timeout;
  32        else
  33                i915_request_retire_upto(rq);
  34
  35        lockdep_unpin_lock(&tl->mutex, rq->cookie);
  36        mutex_unlock(&tl->mutex);
  37
  38        i915_request_put(rq);
  39        intel_timeline_put(tl);
  40
  41        return err;
  42}
  43
  44static int context_sync(struct intel_context *ce)
  45{
  46        struct intel_timeline *tl = ce->timeline;
  47        int err = 0;
  48
  49        mutex_lock(&tl->mutex);
  50        do {
  51                struct i915_request *rq;
  52                long timeout;
  53
  54                if (list_empty(&tl->requests))
  55                        break;
  56
  57                rq = list_last_entry(&tl->requests, typeof(*rq), link);
  58                i915_request_get(rq);
  59
  60                timeout = i915_request_wait(rq, 0, HZ / 10);
  61                if (timeout < 0)
  62                        err = timeout;
  63                else
  64                        i915_request_retire_upto(rq);
  65
  66                i915_request_put(rq);
  67        } while (!err);
  68        mutex_unlock(&tl->mutex);
  69
  70        /* Wait for all barriers to complete (remote CPU) before we check */
  71        i915_active_unlock_wait(&ce->active);
  72        return err;
  73}
  74
  75static int __live_context_size(struct intel_engine_cs *engine)
  76{
  77        struct intel_context *ce;
  78        struct i915_request *rq;
  79        void *vaddr;
  80        int err;
  81
  82        ce = intel_context_create(engine);
  83        if (IS_ERR(ce))
  84                return PTR_ERR(ce);
  85
  86        err = intel_context_pin(ce);
  87        if (err)
  88                goto err;
  89
  90        vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
  91                                                 i915_coherent_map_type(engine->i915,
  92                                                                        ce->state->obj, false));
  93        if (IS_ERR(vaddr)) {
  94                err = PTR_ERR(vaddr);
  95                intel_context_unpin(ce);
  96                goto err;
  97        }
  98
  99        /*
 100         * Note that execlists also applies a redzone which it checks on
 101         * context unpin when debugging. We are using the same location
 102         * and same poison value so that our checks overlap. Despite the
 103         * redundancy, we want to keep this little selftest so that we
 104         * get coverage of any and all submission backends, and we can
 105         * always extend this test to ensure we trick the HW into a
 106         * compromising position wrt to the various sections that need
 107         * to be written into the context state.
 108         *
 109         * TLDR; this overlaps with the execlists redzone.
 110         */
 111        vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
 112        memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
 113
 114        rq = intel_context_create_request(ce);
 115        intel_context_unpin(ce);
 116        if (IS_ERR(rq)) {
 117                err = PTR_ERR(rq);
 118                goto err_unpin;
 119        }
 120
 121        err = request_sync(rq);
 122        if (err)
 123                goto err_unpin;
 124
 125        /* Force the context switch */
 126        rq = intel_engine_create_kernel_request(engine);
 127        if (IS_ERR(rq)) {
 128                err = PTR_ERR(rq);
 129                goto err_unpin;
 130        }
 131        err = request_sync(rq);
 132        if (err)
 133                goto err_unpin;
 134
 135        if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
 136                pr_err("%s context overwrote trailing red-zone!", engine->name);
 137                err = -EINVAL;
 138        }
 139
 140err_unpin:
 141        i915_gem_object_unpin_map(ce->state->obj);
 142err:
 143        intel_context_put(ce);
 144        return err;
 145}
 146
 147static int live_context_size(void *arg)
 148{
 149        struct intel_gt *gt = arg;
 150        struct intel_engine_cs *engine;
 151        enum intel_engine_id id;
 152        int err = 0;
 153
 154        /*
 155         * Check that our context sizes are correct by seeing if the
 156         * HW tries to write past the end of one.
 157         */
 158
 159        for_each_engine(engine, gt, id) {
 160                struct file *saved;
 161
 162                if (!engine->context_size)
 163                        continue;
 164
 165                intel_engine_pm_get(engine);
 166
 167                /*
 168                 * Hide the old default state -- we lie about the context size
 169                 * and get confused when the default state is smaller than
 170                 * expected. For our do nothing request, inheriting the
 171                 * active state is sufficient, we are only checking that we
 172                 * don't use more than we planned.
 173                 */
 174                saved = fetch_and_zero(&engine->default_state);
 175
 176                /* Overlaps with the execlists redzone */
 177                engine->context_size += I915_GTT_PAGE_SIZE;
 178
 179                err = __live_context_size(engine);
 180
 181                engine->context_size -= I915_GTT_PAGE_SIZE;
 182
 183                engine->default_state = saved;
 184
 185                intel_engine_pm_put(engine);
 186
 187                if (err)
 188                        break;
 189        }
 190
 191        return err;
 192}
 193
 194static int __live_active_context(struct intel_engine_cs *engine)
 195{
 196        unsigned long saved_heartbeat;
 197        struct intel_context *ce;
 198        int pass;
 199        int err;
 200
 201        /*
 202         * We keep active contexts alive until after a subsequent context
 203         * switch as the final write from the context-save will be after
 204         * we retire the final request. We track when we unpin the context,
 205         * under the presumption that the final pin is from the last request,
 206         * and instead of immediately unpinning the context, we add a task
 207         * to unpin the context from the next idle-barrier.
 208         *
 209         * This test makes sure that the context is kept alive until a
 210         * subsequent idle-barrier (emitted when the engine wakeref hits 0
 211         * with no more outstanding requests).
 212         *
 213         * In GuC submission mode we don't use idle barriers and we instead
 214         * get a message from the GuC to signal that it is safe to unpin the
 215         * context from memory.
 216         */
 217        if (intel_engine_uses_guc(engine))
 218                return 0;
 219
 220        if (intel_engine_pm_is_awake(engine)) {
 221                pr_err("%s is awake before starting %s!\n",
 222                       engine->name, __func__);
 223                return -EINVAL;
 224        }
 225
 226        ce = intel_context_create(engine);
 227        if (IS_ERR(ce))
 228                return PTR_ERR(ce);
 229
 230        saved_heartbeat = engine->props.heartbeat_interval_ms;
 231        engine->props.heartbeat_interval_ms = 0;
 232
 233        for (pass = 0; pass <= 2; pass++) {
 234                struct i915_request *rq;
 235
 236                intel_engine_pm_get(engine);
 237
 238                rq = intel_context_create_request(ce);
 239                if (IS_ERR(rq)) {
 240                        err = PTR_ERR(rq);
 241                        goto out_engine;
 242                }
 243
 244                err = request_sync(rq);
 245                if (err)
 246                        goto out_engine;
 247
 248                /* Context will be kept active until after an idle-barrier. */
 249                if (i915_active_is_idle(&ce->active)) {
 250                        pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
 251                               engine->name, pass);
 252                        err = -EINVAL;
 253                        goto out_engine;
 254                }
 255
 256                if (!intel_engine_pm_is_awake(engine)) {
 257                        pr_err("%s is asleep before idle-barrier\n",
 258                               engine->name);
 259                        err = -EINVAL;
 260                        goto out_engine;
 261                }
 262
 263out_engine:
 264                intel_engine_pm_put(engine);
 265                if (err)
 266                        goto err;
 267        }
 268
 269        /* Now make sure our idle-barriers are flushed */
 270        err = intel_engine_flush_barriers(engine);
 271        if (err)
 272                goto err;
 273
 274        /* Wait for the barrier and in the process wait for engine to park */
 275        err = context_sync(engine->kernel_context);
 276        if (err)
 277                goto err;
 278
 279        if (!i915_active_is_idle(&ce->active)) {
 280                pr_err("context is still active!");
 281                err = -EINVAL;
 282        }
 283
 284        intel_engine_pm_flush(engine);
 285
 286        if (intel_engine_pm_is_awake(engine)) {
 287                struct drm_printer p = drm_debug_printer(__func__);
 288
 289                intel_engine_dump(engine, &p,
 290                                  "%s is still awake:%d after idle-barriers\n",
 291                                  engine->name,
 292                                  atomic_read(&engine->wakeref.count));
 293                GEM_TRACE_DUMP();
 294
 295                err = -EINVAL;
 296                goto err;
 297        }
 298
 299err:
 300        engine->props.heartbeat_interval_ms = saved_heartbeat;
 301        intel_context_put(ce);
 302        return err;
 303}
 304
 305static int live_active_context(void *arg)
 306{
 307        struct intel_gt *gt = arg;
 308        struct intel_engine_cs *engine;
 309        enum intel_engine_id id;
 310        int err = 0;
 311
 312        for_each_engine(engine, gt, id) {
 313                err = __live_active_context(engine);
 314                if (err)
 315                        break;
 316
 317                err = igt_flush_test(gt->i915);
 318                if (err)
 319                        break;
 320        }
 321
 322        return err;
 323}
 324
 325static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
 326{
 327        struct i915_request *rq;
 328        int err;
 329
 330        err = intel_context_pin(remote);
 331        if (err)
 332                return err;
 333
 334        rq = intel_context_create_request(ce);
 335        if (IS_ERR(rq)) {
 336                err = PTR_ERR(rq);
 337                goto unpin;
 338        }
 339
 340        err = intel_context_prepare_remote_request(remote, rq);
 341        if (err) {
 342                i915_request_add(rq);
 343                goto unpin;
 344        }
 345
 346        err = request_sync(rq);
 347
 348unpin:
 349        intel_context_unpin(remote);
 350        return err;
 351}
 352
 353static int __live_remote_context(struct intel_engine_cs *engine)
 354{
 355        struct intel_context *local, *remote;
 356        unsigned long saved_heartbeat;
 357        int pass;
 358        int err;
 359
 360        /*
 361         * Check that our idle barriers do not interfere with normal
 362         * activity tracking. In particular, check that operating
 363         * on the context image remotely (intel_context_prepare_remote_request),
 364         * which inserts foreign fences into intel_context.active, does not
 365         * clobber the idle-barrier.
 366         *
 367         * In GuC submission mode we don't use idle barriers.
 368         */
 369        if (intel_engine_uses_guc(engine))
 370                return 0;
 371
 372        if (intel_engine_pm_is_awake(engine)) {
 373                pr_err("%s is awake before starting %s!\n",
 374                       engine->name, __func__);
 375                return -EINVAL;
 376        }
 377
 378        remote = intel_context_create(engine);
 379        if (IS_ERR(remote))
 380                return PTR_ERR(remote);
 381
 382        local = intel_context_create(engine);
 383        if (IS_ERR(local)) {
 384                err = PTR_ERR(local);
 385                goto err_remote;
 386        }
 387
 388        saved_heartbeat = engine->props.heartbeat_interval_ms;
 389        engine->props.heartbeat_interval_ms = 0;
 390        intel_engine_pm_get(engine);
 391
 392        for (pass = 0; pass <= 2; pass++) {
 393                err = __remote_sync(local, remote);
 394                if (err)
 395                        break;
 396
 397                err = __remote_sync(engine->kernel_context, remote);
 398                if (err)
 399                        break;
 400
 401                if (i915_active_is_idle(&remote->active)) {
 402                        pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
 403                               engine->name, pass);
 404                        err = -EINVAL;
 405                        break;
 406                }
 407        }
 408
 409        intel_engine_pm_put(engine);
 410        engine->props.heartbeat_interval_ms = saved_heartbeat;
 411
 412        intel_context_put(local);
 413err_remote:
 414        intel_context_put(remote);
 415        return err;
 416}
 417
 418static int live_remote_context(void *arg)
 419{
 420        struct intel_gt *gt = arg;
 421        struct intel_engine_cs *engine;
 422        enum intel_engine_id id;
 423        int err = 0;
 424
 425        for_each_engine(engine, gt, id) {
 426                err = __live_remote_context(engine);
 427                if (err)
 428                        break;
 429
 430                err = igt_flush_test(gt->i915);
 431                if (err)
 432                        break;
 433        }
 434
 435        return err;
 436}
 437
 438int intel_context_live_selftests(struct drm_i915_private *i915)
 439{
 440        static const struct i915_subtest tests[] = {
 441                SUBTEST(live_context_size),
 442                SUBTEST(live_active_context),
 443                SUBTEST(live_remote_context),
 444        };
 445        struct intel_gt *gt = &i915->gt;
 446
 447        if (intel_gt_is_wedged(gt))
 448                return 0;
 449
 450        return intel_gt_live_subtests(tests, gt);
 451}
 452