linux/drivers/gpu/drm/i915/gt/selftest_execlists.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2018 Intel Corporation
   4 */
   5
   6#include <linux/prime_numbers.h>
   7
   8#include "gem/i915_gem_pm.h"
   9#include "gt/intel_engine_heartbeat.h"
  10#include "gt/intel_reset.h"
  11#include "gt/selftest_engine_heartbeat.h"
  12
  13#include "i915_selftest.h"
  14#include "selftests/i915_random.h"
  15#include "selftests/igt_flush_test.h"
  16#include "selftests/igt_live_test.h"
  17#include "selftests/igt_spinner.h"
  18#include "selftests/lib_sw_fence.h"
  19
  20#include "gem/selftests/igt_gem_utils.h"
  21#include "gem/selftests/mock_context.h"
  22
  23#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
  24#define NUM_GPR 16
  25#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
  26
  27static bool is_active(struct i915_request *rq)
  28{
  29        if (i915_request_is_active(rq))
  30                return true;
  31
  32        if (i915_request_on_hold(rq))
  33                return true;
  34
  35        if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
  36                return true;
  37
  38        return false;
  39}
  40
  41static int wait_for_submit(struct intel_engine_cs *engine,
  42                           struct i915_request *rq,
  43                           unsigned long timeout)
  44{
  45        /* Ignore our own attempts to suppress excess tasklets */
  46        tasklet_hi_schedule(&engine->sched_engine->tasklet);
  47
  48        timeout += jiffies;
  49        do {
  50                bool done = time_after(jiffies, timeout);
  51
  52                if (i915_request_completed(rq)) /* that was quick! */
  53                        return 0;
  54
  55                /* Wait until the HW has acknowleged the submission (or err) */
  56                intel_engine_flush_submission(engine);
  57                if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
  58                        return 0;
  59
  60                if (done)
  61                        return -ETIME;
  62
  63                cond_resched();
  64        } while (1);
  65}
  66
  67static int wait_for_reset(struct intel_engine_cs *engine,
  68                          struct i915_request *rq,
  69                          unsigned long timeout)
  70{
  71        timeout += jiffies;
  72
  73        do {
  74                cond_resched();
  75                intel_engine_flush_submission(engine);
  76
  77                if (READ_ONCE(engine->execlists.pending[0]))
  78                        continue;
  79
  80                if (i915_request_completed(rq))
  81                        break;
  82
  83                if (READ_ONCE(rq->fence.error))
  84                        break;
  85        } while (time_before(jiffies, timeout));
  86
  87        flush_scheduled_work();
  88
  89        if (rq->fence.error != -EIO) {
  90                pr_err("%s: hanging request %llx:%lld not reset\n",
  91                       engine->name,
  92                       rq->fence.context,
  93                       rq->fence.seqno);
  94                return -EINVAL;
  95        }
  96
  97        /* Give the request a jiffie to complete after flushing the worker */
  98        if (i915_request_wait(rq, 0,
  99                              max(0l, (long)(timeout - jiffies)) + 1) < 0) {
 100                pr_err("%s: hanging request %llx:%lld did not complete\n",
 101                       engine->name,
 102                       rq->fence.context,
 103                       rq->fence.seqno);
 104                return -ETIME;
 105        }
 106
 107        return 0;
 108}
 109
 110static int live_sanitycheck(void *arg)
 111{
 112        struct intel_gt *gt = arg;
 113        struct intel_engine_cs *engine;
 114        enum intel_engine_id id;
 115        struct igt_spinner spin;
 116        int err = 0;
 117
 118        if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
 119                return 0;
 120
 121        if (igt_spinner_init(&spin, gt))
 122                return -ENOMEM;
 123
 124        for_each_engine(engine, gt, id) {
 125                struct intel_context *ce;
 126                struct i915_request *rq;
 127
 128                ce = intel_context_create(engine);
 129                if (IS_ERR(ce)) {
 130                        err = PTR_ERR(ce);
 131                        break;
 132                }
 133
 134                rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
 135                if (IS_ERR(rq)) {
 136                        err = PTR_ERR(rq);
 137                        goto out_ctx;
 138                }
 139
 140                i915_request_add(rq);
 141                if (!igt_wait_for_spinner(&spin, rq)) {
 142                        GEM_TRACE("spinner failed to start\n");
 143                        GEM_TRACE_DUMP();
 144                        intel_gt_set_wedged(gt);
 145                        err = -EIO;
 146                        goto out_ctx;
 147                }
 148
 149                igt_spinner_end(&spin);
 150                if (igt_flush_test(gt->i915)) {
 151                        err = -EIO;
 152                        goto out_ctx;
 153                }
 154
 155out_ctx:
 156                intel_context_put(ce);
 157                if (err)
 158                        break;
 159        }
 160
 161        igt_spinner_fini(&spin);
 162        return err;
 163}
 164
 165static int live_unlite_restore(struct intel_gt *gt, int prio)
 166{
 167        struct intel_engine_cs *engine;
 168        enum intel_engine_id id;
 169        struct igt_spinner spin;
 170        int err = -ENOMEM;
 171
 172        /*
 173         * Check that we can correctly context switch between 2 instances
 174         * on the same engine from the same parent context.
 175         */
 176
 177        if (igt_spinner_init(&spin, gt))
 178                return err;
 179
 180        err = 0;
 181        for_each_engine(engine, gt, id) {
 182                struct intel_context *ce[2] = {};
 183                struct i915_request *rq[2];
 184                struct igt_live_test t;
 185                int n;
 186
 187                if (prio && !intel_engine_has_preemption(engine))
 188                        continue;
 189
 190                if (!intel_engine_can_store_dword(engine))
 191                        continue;
 192
 193                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
 194                        err = -EIO;
 195                        break;
 196                }
 197                st_engine_heartbeat_disable(engine);
 198
 199                for (n = 0; n < ARRAY_SIZE(ce); n++) {
 200                        struct intel_context *tmp;
 201
 202                        tmp = intel_context_create(engine);
 203                        if (IS_ERR(tmp)) {
 204                                err = PTR_ERR(tmp);
 205                                goto err_ce;
 206                        }
 207
 208                        err = intel_context_pin(tmp);
 209                        if (err) {
 210                                intel_context_put(tmp);
 211                                goto err_ce;
 212                        }
 213
 214                        /*
 215                         * Setup the pair of contexts such that if we
 216                         * lite-restore using the RING_TAIL from ce[1] it
 217                         * will execute garbage from ce[0]->ring.
 218                         */
 219                        memset(tmp->ring->vaddr,
 220                               POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
 221                               tmp->ring->vma->size);
 222
 223                        ce[n] = tmp;
 224                }
 225                GEM_BUG_ON(!ce[1]->ring->size);
 226                intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
 227                lrc_update_regs(ce[1], engine, ce[1]->ring->head);
 228
 229                rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
 230                if (IS_ERR(rq[0])) {
 231                        err = PTR_ERR(rq[0]);
 232                        goto err_ce;
 233                }
 234
 235                i915_request_get(rq[0]);
 236                i915_request_add(rq[0]);
 237                GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
 238
 239                if (!igt_wait_for_spinner(&spin, rq[0])) {
 240                        i915_request_put(rq[0]);
 241                        goto err_ce;
 242                }
 243
 244                rq[1] = i915_request_create(ce[1]);
 245                if (IS_ERR(rq[1])) {
 246                        err = PTR_ERR(rq[1]);
 247                        i915_request_put(rq[0]);
 248                        goto err_ce;
 249                }
 250
 251                if (!prio) {
 252                        /*
 253                         * Ensure we do the switch to ce[1] on completion.
 254                         *
 255                         * rq[0] is already submitted, so this should reduce
 256                         * to a no-op (a wait on a request on the same engine
 257                         * uses the submit fence, not the completion fence),
 258                         * but it will install a dependency on rq[1] for rq[0]
 259                         * that will prevent the pair being reordered by
 260                         * timeslicing.
 261                         */
 262                        i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 263                }
 264
 265                i915_request_get(rq[1]);
 266                i915_request_add(rq[1]);
 267                GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
 268                i915_request_put(rq[0]);
 269
 270                if (prio) {
 271                        struct i915_sched_attr attr = {
 272                                .priority = prio,
 273                        };
 274
 275                        /* Alternatively preempt the spinner with ce[1] */
 276                        engine->sched_engine->schedule(rq[1], &attr);
 277                }
 278
 279                /* And switch back to ce[0] for good measure */
 280                rq[0] = i915_request_create(ce[0]);
 281                if (IS_ERR(rq[0])) {
 282                        err = PTR_ERR(rq[0]);
 283                        i915_request_put(rq[1]);
 284                        goto err_ce;
 285                }
 286
 287                i915_request_await_dma_fence(rq[0], &rq[1]->fence);
 288                i915_request_get(rq[0]);
 289                i915_request_add(rq[0]);
 290                GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
 291                i915_request_put(rq[1]);
 292                i915_request_put(rq[0]);
 293
 294err_ce:
 295                intel_engine_flush_submission(engine);
 296                igt_spinner_end(&spin);
 297                for (n = 0; n < ARRAY_SIZE(ce); n++) {
 298                        if (IS_ERR_OR_NULL(ce[n]))
 299                                break;
 300
 301                        intel_context_unpin(ce[n]);
 302                        intel_context_put(ce[n]);
 303                }
 304
 305                st_engine_heartbeat_enable(engine);
 306                if (igt_live_test_end(&t))
 307                        err = -EIO;
 308                if (err)
 309                        break;
 310        }
 311
 312        igt_spinner_fini(&spin);
 313        return err;
 314}
 315
 316static int live_unlite_switch(void *arg)
 317{
 318        return live_unlite_restore(arg, 0);
 319}
 320
 321static int live_unlite_preempt(void *arg)
 322{
 323        return live_unlite_restore(arg, I915_PRIORITY_MAX);
 324}
 325
 326static int live_unlite_ring(void *arg)
 327{
 328        struct intel_gt *gt = arg;
 329        struct intel_engine_cs *engine;
 330        struct igt_spinner spin;
 331        enum intel_engine_id id;
 332        int err = 0;
 333
 334        /*
 335         * Setup a preemption event that will cause almost the entire ring
 336         * to be unwound, potentially fooling our intel_ring_direction()
 337         * into emitting a forward lite-restore instead of the rollback.
 338         */
 339
 340        if (igt_spinner_init(&spin, gt))
 341                return -ENOMEM;
 342
 343        for_each_engine(engine, gt, id) {
 344                struct intel_context *ce[2] = {};
 345                struct i915_request *rq;
 346                struct igt_live_test t;
 347                int n;
 348
 349                if (!intel_engine_has_preemption(engine))
 350                        continue;
 351
 352                if (!intel_engine_can_store_dword(engine))
 353                        continue;
 354
 355                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
 356                        err = -EIO;
 357                        break;
 358                }
 359                st_engine_heartbeat_disable(engine);
 360
 361                for (n = 0; n < ARRAY_SIZE(ce); n++) {
 362                        struct intel_context *tmp;
 363
 364                        tmp = intel_context_create(engine);
 365                        if (IS_ERR(tmp)) {
 366                                err = PTR_ERR(tmp);
 367                                goto err_ce;
 368                        }
 369
 370                        err = intel_context_pin(tmp);
 371                        if (err) {
 372                                intel_context_put(tmp);
 373                                goto err_ce;
 374                        }
 375
 376                        memset32(tmp->ring->vaddr,
 377                                 0xdeadbeef, /* trigger a hang if executed */
 378                                 tmp->ring->vma->size / sizeof(u32));
 379
 380                        ce[n] = tmp;
 381                }
 382
 383                /* Create max prio spinner, followed by N low prio nops */
 384                rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
 385                if (IS_ERR(rq)) {
 386                        err = PTR_ERR(rq);
 387                        goto err_ce;
 388                }
 389
 390                i915_request_get(rq);
 391                rq->sched.attr.priority = I915_PRIORITY_BARRIER;
 392                i915_request_add(rq);
 393
 394                if (!igt_wait_for_spinner(&spin, rq)) {
 395                        intel_gt_set_wedged(gt);
 396                        i915_request_put(rq);
 397                        err = -ETIME;
 398                        goto err_ce;
 399                }
 400
 401                /* Fill the ring, until we will cause a wrap */
 402                n = 0;
 403                while (intel_ring_direction(ce[0]->ring,
 404                                            rq->wa_tail,
 405                                            ce[0]->ring->tail) <= 0) {
 406                        struct i915_request *tmp;
 407
 408                        tmp = intel_context_create_request(ce[0]);
 409                        if (IS_ERR(tmp)) {
 410                                err = PTR_ERR(tmp);
 411                                i915_request_put(rq);
 412                                goto err_ce;
 413                        }
 414
 415                        i915_request_add(tmp);
 416                        intel_engine_flush_submission(engine);
 417                        n++;
 418                }
 419                intel_engine_flush_submission(engine);
 420                pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
 421                         engine->name, n,
 422                         ce[0]->ring->size,
 423                         ce[0]->ring->tail,
 424                         ce[0]->ring->emit,
 425                         rq->tail);
 426                GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
 427                                                rq->tail,
 428                                                ce[0]->ring->tail) <= 0);
 429                i915_request_put(rq);
 430
 431                /* Create a second ring to preempt the first ring after rq[0] */
 432                rq = intel_context_create_request(ce[1]);
 433                if (IS_ERR(rq)) {
 434                        err = PTR_ERR(rq);
 435                        goto err_ce;
 436                }
 437
 438                rq->sched.attr.priority = I915_PRIORITY_BARRIER;
 439                i915_request_get(rq);
 440                i915_request_add(rq);
 441
 442                err = wait_for_submit(engine, rq, HZ / 2);
 443                i915_request_put(rq);
 444                if (err) {
 445                        pr_err("%s: preemption request was not submitted\n",
 446                               engine->name);
 447                        err = -ETIME;
 448                }
 449
 450                pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
 451                         engine->name,
 452                         ce[0]->ring->tail, ce[0]->ring->emit,
 453                         ce[1]->ring->tail, ce[1]->ring->emit);
 454
 455err_ce:
 456                intel_engine_flush_submission(engine);
 457                igt_spinner_end(&spin);
 458                for (n = 0; n < ARRAY_SIZE(ce); n++) {
 459                        if (IS_ERR_OR_NULL(ce[n]))
 460                                break;
 461
 462                        intel_context_unpin(ce[n]);
 463                        intel_context_put(ce[n]);
 464                }
 465                st_engine_heartbeat_enable(engine);
 466                if (igt_live_test_end(&t))
 467                        err = -EIO;
 468                if (err)
 469                        break;
 470        }
 471
 472        igt_spinner_fini(&spin);
 473        return err;
 474}
 475
 476static int live_pin_rewind(void *arg)
 477{
 478        struct intel_gt *gt = arg;
 479        struct intel_engine_cs *engine;
 480        enum intel_engine_id id;
 481        int err = 0;
 482
 483        /*
 484         * We have to be careful not to trust intel_ring too much, for example
 485         * ring->head is updated upon retire which is out of sync with pinning
 486         * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
 487         * or else we risk writing an older, stale value.
 488         *
 489         * To simulate this, let's apply a bit of deliberate sabotague.
 490         */
 491
 492        for_each_engine(engine, gt, id) {
 493                struct intel_context *ce;
 494                struct i915_request *rq;
 495                struct intel_ring *ring;
 496                struct igt_live_test t;
 497
 498                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
 499                        err = -EIO;
 500                        break;
 501                }
 502
 503                ce = intel_context_create(engine);
 504                if (IS_ERR(ce)) {
 505                        err = PTR_ERR(ce);
 506                        break;
 507                }
 508
 509                err = intel_context_pin(ce);
 510                if (err) {
 511                        intel_context_put(ce);
 512                        break;
 513                }
 514
 515                /* Keep the context awake while we play games */
 516                err = i915_active_acquire(&ce->active);
 517                if (err) {
 518                        intel_context_unpin(ce);
 519                        intel_context_put(ce);
 520                        break;
 521                }
 522                ring = ce->ring;
 523
 524                /* Poison the ring, and offset the next request from HEAD */
 525                memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
 526                ring->emit = ring->size / 2;
 527                ring->tail = ring->emit;
 528                GEM_BUG_ON(ring->head);
 529
 530                intel_context_unpin(ce);
 531
 532                /* Submit a simple nop request */
 533                GEM_BUG_ON(intel_context_is_pinned(ce));
 534                rq = intel_context_create_request(ce);
 535                i915_active_release(&ce->active); /* e.g. async retire */
 536                intel_context_put(ce);
 537                if (IS_ERR(rq)) {
 538                        err = PTR_ERR(rq);
 539                        break;
 540                }
 541                GEM_BUG_ON(!rq->head);
 542                i915_request_add(rq);
 543
 544                /* Expect not to hang! */
 545                if (igt_live_test_end(&t)) {
 546                        err = -EIO;
 547                        break;
 548                }
 549        }
 550
 551        return err;
 552}
 553
 554static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
 555{
 556        tasklet_disable(&engine->sched_engine->tasklet);
 557        local_bh_disable();
 558
 559        if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
 560                             &engine->gt->reset.flags)) {
 561                local_bh_enable();
 562                tasklet_enable(&engine->sched_engine->tasklet);
 563
 564                intel_gt_set_wedged(engine->gt);
 565                return -EBUSY;
 566        }
 567
 568        return 0;
 569}
 570
 571static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
 572{
 573        clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
 574                              &engine->gt->reset.flags);
 575
 576        local_bh_enable();
 577        tasklet_enable(&engine->sched_engine->tasklet);
 578}
 579
 580static int live_hold_reset(void *arg)
 581{
 582        struct intel_gt *gt = arg;
 583        struct intel_engine_cs *engine;
 584        enum intel_engine_id id;
 585        struct igt_spinner spin;
 586        int err = 0;
 587
 588        /*
 589         * In order to support offline error capture for fast preempt reset,
 590         * we need to decouple the guilty request and ensure that it and its
 591         * descendents are not executed while the capture is in progress.
 592         */
 593
 594        if (!intel_has_reset_engine(gt))
 595                return 0;
 596
 597        if (igt_spinner_init(&spin, gt))
 598                return -ENOMEM;
 599
 600        for_each_engine(engine, gt, id) {
 601                struct intel_context *ce;
 602                struct i915_request *rq;
 603
 604                ce = intel_context_create(engine);
 605                if (IS_ERR(ce)) {
 606                        err = PTR_ERR(ce);
 607                        break;
 608                }
 609
 610                st_engine_heartbeat_disable(engine);
 611
 612                rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
 613                if (IS_ERR(rq)) {
 614                        err = PTR_ERR(rq);
 615                        goto out;
 616                }
 617                i915_request_add(rq);
 618
 619                if (!igt_wait_for_spinner(&spin, rq)) {
 620                        intel_gt_set_wedged(gt);
 621                        err = -ETIME;
 622                        goto out;
 623                }
 624
 625                /* We have our request executing, now remove it and reset */
 626
 627                err = engine_lock_reset_tasklet(engine);
 628                if (err)
 629                        goto out;
 630
 631                engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
 632                GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 633
 634                i915_request_get(rq);
 635                execlists_hold(engine, rq);
 636                GEM_BUG_ON(!i915_request_on_hold(rq));
 637
 638                __intel_engine_reset_bh(engine, NULL);
 639                GEM_BUG_ON(rq->fence.error != -EIO);
 640
 641                engine_unlock_reset_tasklet(engine);
 642
 643                /* Check that we do not resubmit the held request */
 644                if (!i915_request_wait(rq, 0, HZ / 5)) {
 645                        pr_err("%s: on hold request completed!\n",
 646                               engine->name);
 647                        i915_request_put(rq);
 648                        err = -EIO;
 649                        goto out;
 650                }
 651                GEM_BUG_ON(!i915_request_on_hold(rq));
 652
 653                /* But is resubmitted on release */
 654                execlists_unhold(engine, rq);
 655                if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 656                        pr_err("%s: held request did not complete!\n",
 657                               engine->name);
 658                        intel_gt_set_wedged(gt);
 659                        err = -ETIME;
 660                }
 661                i915_request_put(rq);
 662
 663out:
 664                st_engine_heartbeat_enable(engine);
 665                intel_context_put(ce);
 666                if (err)
 667                        break;
 668        }
 669
 670        igt_spinner_fini(&spin);
 671        return err;
 672}
 673
 674static const char *error_repr(int err)
 675{
 676        return err ? "bad" : "good";
 677}
 678
 679static int live_error_interrupt(void *arg)
 680{
 681        static const struct error_phase {
 682                enum { GOOD = 0, BAD = -EIO } error[2];
 683        } phases[] = {
 684                { { BAD,  GOOD } },
 685                { { BAD,  BAD  } },
 686                { { BAD,  GOOD } },
 687                { { GOOD, GOOD } }, /* sentinel */
 688        };
 689        struct intel_gt *gt = arg;
 690        struct intel_engine_cs *engine;
 691        enum intel_engine_id id;
 692
 693        /*
 694         * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
 695         * of invalid commands in user batches that will cause a GPU hang.
 696         * This is a faster mechanism than using hangcheck/heartbeats, but
 697         * only detects problems the HW knows about -- it will not warn when
 698         * we kill the HW!
 699         *
 700         * To verify our detection and reset, we throw some invalid commands
 701         * at the HW and wait for the interrupt.
 702         */
 703
 704        if (!intel_has_reset_engine(gt))
 705                return 0;
 706
 707        for_each_engine(engine, gt, id) {
 708                const struct error_phase *p;
 709                int err = 0;
 710
 711                st_engine_heartbeat_disable(engine);
 712
 713                for (p = phases; p->error[0] != GOOD; p++) {
 714                        struct i915_request *client[ARRAY_SIZE(phases->error)];
 715                        u32 *cs;
 716                        int i;
 717
 718                        memset(client, 0, sizeof(*client));
 719                        for (i = 0; i < ARRAY_SIZE(client); i++) {
 720                                struct intel_context *ce;
 721                                struct i915_request *rq;
 722
 723                                ce = intel_context_create(engine);
 724                                if (IS_ERR(ce)) {
 725                                        err = PTR_ERR(ce);
 726                                        goto out;
 727                                }
 728
 729                                rq = intel_context_create_request(ce);
 730                                intel_context_put(ce);
 731                                if (IS_ERR(rq)) {
 732                                        err = PTR_ERR(rq);
 733                                        goto out;
 734                                }
 735
 736                                if (rq->engine->emit_init_breadcrumb) {
 737                                        err = rq->engine->emit_init_breadcrumb(rq);
 738                                        if (err) {
 739                                                i915_request_add(rq);
 740                                                goto out;
 741                                        }
 742                                }
 743
 744                                cs = intel_ring_begin(rq, 2);
 745                                if (IS_ERR(cs)) {
 746                                        i915_request_add(rq);
 747                                        err = PTR_ERR(cs);
 748                                        goto out;
 749                                }
 750
 751                                if (p->error[i]) {
 752                                        *cs++ = 0xdeadbeef;
 753                                        *cs++ = 0xdeadbeef;
 754                                } else {
 755                                        *cs++ = MI_NOOP;
 756                                        *cs++ = MI_NOOP;
 757                                }
 758
 759                                client[i] = i915_request_get(rq);
 760                                i915_request_add(rq);
 761                        }
 762
 763                        err = wait_for_submit(engine, client[0], HZ / 2);
 764                        if (err) {
 765                                pr_err("%s: first request did not start within time!\n",
 766                                       engine->name);
 767                                err = -ETIME;
 768                                goto out;
 769                        }
 770
 771                        for (i = 0; i < ARRAY_SIZE(client); i++) {
 772                                if (i915_request_wait(client[i], 0, HZ / 5) < 0)
 773                                        pr_debug("%s: %s request incomplete!\n",
 774                                                 engine->name,
 775                                                 error_repr(p->error[i]));
 776
 777                                if (!i915_request_started(client[i])) {
 778                                        pr_err("%s: %s request not started!\n",
 779                                               engine->name,
 780                                               error_repr(p->error[i]));
 781                                        err = -ETIME;
 782                                        goto out;
 783                                }
 784
 785                                /* Kick the tasklet to process the error */
 786                                intel_engine_flush_submission(engine);
 787                                if (client[i]->fence.error != p->error[i]) {
 788                                        pr_err("%s: %s request (%s) with wrong error code: %d\n",
 789                                               engine->name,
 790                                               error_repr(p->error[i]),
 791                                               i915_request_completed(client[i]) ? "completed" : "running",
 792                                               client[i]->fence.error);
 793                                        err = -EINVAL;
 794                                        goto out;
 795                                }
 796                        }
 797
 798out:
 799                        for (i = 0; i < ARRAY_SIZE(client); i++)
 800                                if (client[i])
 801                                        i915_request_put(client[i]);
 802                        if (err) {
 803                                pr_err("%s: failed at phase[%zd] { %d, %d }\n",
 804                                       engine->name, p - phases,
 805                                       p->error[0], p->error[1]);
 806                                break;
 807                        }
 808                }
 809
 810                st_engine_heartbeat_enable(engine);
 811                if (err) {
 812                        intel_gt_set_wedged(gt);
 813                        return err;
 814                }
 815        }
 816
 817        return 0;
 818}
 819
 820static int
 821emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
 822{
 823        u32 *cs;
 824
 825        cs = intel_ring_begin(rq, 10);
 826        if (IS_ERR(cs))
 827                return PTR_ERR(cs);
 828
 829        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 830
 831        *cs++ = MI_SEMAPHORE_WAIT |
 832                MI_SEMAPHORE_GLOBAL_GTT |
 833                MI_SEMAPHORE_POLL |
 834                MI_SEMAPHORE_SAD_NEQ_SDD;
 835        *cs++ = 0;
 836        *cs++ = i915_ggtt_offset(vma) + 4 * idx;
 837        *cs++ = 0;
 838
 839        if (idx > 0) {
 840                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 841                *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
 842                *cs++ = 0;
 843                *cs++ = 1;
 844        } else {
 845                *cs++ = MI_NOOP;
 846                *cs++ = MI_NOOP;
 847                *cs++ = MI_NOOP;
 848                *cs++ = MI_NOOP;
 849        }
 850
 851        *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 852
 853        intel_ring_advance(rq, cs);
 854        return 0;
 855}
 856
 857static struct i915_request *
 858semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 859{
 860        struct intel_context *ce;
 861        struct i915_request *rq;
 862        int err;
 863
 864        ce = intel_context_create(engine);
 865        if (IS_ERR(ce))
 866                return ERR_CAST(ce);
 867
 868        rq = intel_context_create_request(ce);
 869        if (IS_ERR(rq))
 870                goto out_ce;
 871
 872        err = 0;
 873        if (rq->engine->emit_init_breadcrumb)
 874                err = rq->engine->emit_init_breadcrumb(rq);
 875        if (err == 0)
 876                err = emit_semaphore_chain(rq, vma, idx);
 877        if (err == 0)
 878                i915_request_get(rq);
 879        i915_request_add(rq);
 880        if (err)
 881                rq = ERR_PTR(err);
 882
 883out_ce:
 884        intel_context_put(ce);
 885        return rq;
 886}
 887
 888static int
 889release_queue(struct intel_engine_cs *engine,
 890              struct i915_vma *vma,
 891              int idx, int prio)
 892{
 893        struct i915_sched_attr attr = {
 894                .priority = prio,
 895        };
 896        struct i915_request *rq;
 897        u32 *cs;
 898
 899        rq = intel_engine_create_kernel_request(engine);
 900        if (IS_ERR(rq))
 901                return PTR_ERR(rq);
 902
 903        cs = intel_ring_begin(rq, 4);
 904        if (IS_ERR(cs)) {
 905                i915_request_add(rq);
 906                return PTR_ERR(cs);
 907        }
 908
 909        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 910        *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
 911        *cs++ = 0;
 912        *cs++ = 1;
 913
 914        intel_ring_advance(rq, cs);
 915
 916        i915_request_get(rq);
 917        i915_request_add(rq);
 918
 919        local_bh_disable();
 920        engine->sched_engine->schedule(rq, &attr);
 921        local_bh_enable(); /* kick tasklet */
 922
 923        i915_request_put(rq);
 924
 925        return 0;
 926}
 927
 928static int
 929slice_semaphore_queue(struct intel_engine_cs *outer,
 930                      struct i915_vma *vma,
 931                      int count)
 932{
 933        struct intel_engine_cs *engine;
 934        struct i915_request *head;
 935        enum intel_engine_id id;
 936        int err, i, n = 0;
 937
 938        head = semaphore_queue(outer, vma, n++);
 939        if (IS_ERR(head))
 940                return PTR_ERR(head);
 941
 942        for_each_engine(engine, outer->gt, id) {
 943                if (!intel_engine_has_preemption(engine))
 944                        continue;
 945
 946                for (i = 0; i < count; i++) {
 947                        struct i915_request *rq;
 948
 949                        rq = semaphore_queue(engine, vma, n++);
 950                        if (IS_ERR(rq)) {
 951                                err = PTR_ERR(rq);
 952                                goto out;
 953                        }
 954
 955                        i915_request_put(rq);
 956                }
 957        }
 958
 959        err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
 960        if (err)
 961                goto out;
 962
 963        if (i915_request_wait(head, 0,
 964                              2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
 965                pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
 966                       outer->name, count, n);
 967                GEM_TRACE_DUMP();
 968                intel_gt_set_wedged(outer->gt);
 969                err = -EIO;
 970        }
 971
 972out:
 973        i915_request_put(head);
 974        return err;
 975}
 976
 977static int live_timeslice_preempt(void *arg)
 978{
 979        struct intel_gt *gt = arg;
 980        struct drm_i915_gem_object *obj;
 981        struct intel_engine_cs *engine;
 982        enum intel_engine_id id;
 983        struct i915_vma *vma;
 984        void *vaddr;
 985        int err = 0;
 986
 987        /*
 988         * If a request takes too long, we would like to give other users
 989         * a fair go on the GPU. In particular, users may create batches
 990         * that wait upon external input, where that input may even be
 991         * supplied by another GPU job. To avoid blocking forever, we
 992         * need to preempt the current task and replace it with another
 993         * ready task.
 994         */
 995        if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
 996                return 0;
 997
 998        obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
 999        if (IS_ERR(obj))
1000                return PTR_ERR(obj);
1001
1002        vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
1003        if (IS_ERR(vma)) {
1004                err = PTR_ERR(vma);
1005                goto err_obj;
1006        }
1007
1008        vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1009        if (IS_ERR(vaddr)) {
1010                err = PTR_ERR(vaddr);
1011                goto err_obj;
1012        }
1013
1014        err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1015        if (err)
1016                goto err_map;
1017
1018        err = i915_vma_sync(vma);
1019        if (err)
1020                goto err_pin;
1021
1022        for_each_engine(engine, gt, id) {
1023                if (!intel_engine_has_preemption(engine))
1024                        continue;
1025
1026                memset(vaddr, 0, PAGE_SIZE);
1027
1028                st_engine_heartbeat_disable(engine);
1029                err = slice_semaphore_queue(engine, vma, 5);
1030                st_engine_heartbeat_enable(engine);
1031                if (err)
1032                        goto err_pin;
1033
1034                if (igt_flush_test(gt->i915)) {
1035                        err = -EIO;
1036                        goto err_pin;
1037                }
1038        }
1039
1040err_pin:
1041        i915_vma_unpin(vma);
1042err_map:
1043        i915_gem_object_unpin_map(obj);
1044err_obj:
1045        i915_gem_object_put(obj);
1046        return err;
1047}
1048
1049static struct i915_request *
1050create_rewinder(struct intel_context *ce,
1051                struct i915_request *wait,
1052                void *slot, int idx)
1053{
1054        const u32 offset =
1055                i915_ggtt_offset(ce->engine->status_page.vma) +
1056                offset_in_page(slot);
1057        struct i915_request *rq;
1058        u32 *cs;
1059        int err;
1060
1061        rq = intel_context_create_request(ce);
1062        if (IS_ERR(rq))
1063                return rq;
1064
1065        if (wait) {
1066                err = i915_request_await_dma_fence(rq, &wait->fence);
1067                if (err)
1068                        goto err;
1069        }
1070
1071        cs = intel_ring_begin(rq, 14);
1072        if (IS_ERR(cs)) {
1073                err = PTR_ERR(cs);
1074                goto err;
1075        }
1076
1077        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1078        *cs++ = MI_NOOP;
1079
1080        *cs++ = MI_SEMAPHORE_WAIT |
1081                MI_SEMAPHORE_GLOBAL_GTT |
1082                MI_SEMAPHORE_POLL |
1083                MI_SEMAPHORE_SAD_GTE_SDD;
1084        *cs++ = idx;
1085        *cs++ = offset;
1086        *cs++ = 0;
1087
1088        *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
1089        *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
1090        *cs++ = offset + idx * sizeof(u32);
1091        *cs++ = 0;
1092
1093        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1094        *cs++ = offset;
1095        *cs++ = 0;
1096        *cs++ = idx + 1;
1097
1098        intel_ring_advance(rq, cs);
1099
1100        err = 0;
1101err:
1102        i915_request_get(rq);
1103        i915_request_add(rq);
1104        if (err) {
1105                i915_request_put(rq);
1106                return ERR_PTR(err);
1107        }
1108
1109        return rq;
1110}
1111
1112static int live_timeslice_rewind(void *arg)
1113{
1114        struct intel_gt *gt = arg;
1115        struct intel_engine_cs *engine;
1116        enum intel_engine_id id;
1117
1118        /*
1119         * The usual presumption on timeslice expiration is that we replace
1120         * the active context with another. However, given a chain of
1121         * dependencies we may end up with replacing the context with itself,
1122         * but only a few of those requests, forcing us to rewind the
1123         * RING_TAIL of the original request.
1124         */
1125        if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
1126                return 0;
1127
1128        for_each_engine(engine, gt, id) {
1129                enum { A1, A2, B1 };
1130                enum { X = 1, Z, Y };
1131                struct i915_request *rq[3] = {};
1132                struct intel_context *ce;
1133                unsigned long timeslice;
1134                int i, err = 0;
1135                u32 *slot;
1136
1137                if (!intel_engine_has_timeslices(engine))
1138                        continue;
1139
1140                /*
1141                 * A:rq1 -- semaphore wait, timestamp X
1142                 * A:rq2 -- write timestamp Y
1143                 *
1144                 * B:rq1 [await A:rq1] -- write timestamp Z
1145                 *
1146                 * Force timeslice, release semaphore.
1147                 *
1148                 * Expect execution/evaluation order XZY
1149                 */
1150
1151                st_engine_heartbeat_disable(engine);
1152                timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
1153
1154                slot = memset32(engine->status_page.addr + 1000, 0, 4);
1155
1156                ce = intel_context_create(engine);
1157                if (IS_ERR(ce)) {
1158                        err = PTR_ERR(ce);
1159                        goto err;
1160                }
1161
1162                rq[A1] = create_rewinder(ce, NULL, slot, X);
1163                if (IS_ERR(rq[A1])) {
1164                        intel_context_put(ce);
1165                        goto err;
1166                }
1167
1168                rq[A2] = create_rewinder(ce, NULL, slot, Y);
1169                intel_context_put(ce);
1170                if (IS_ERR(rq[A2]))
1171                        goto err;
1172
1173                err = wait_for_submit(engine, rq[A2], HZ / 2);
1174                if (err) {
1175                        pr_err("%s: failed to submit first context\n",
1176                               engine->name);
1177                        goto err;
1178                }
1179
1180                ce = intel_context_create(engine);
1181                if (IS_ERR(ce)) {
1182                        err = PTR_ERR(ce);
1183                        goto err;
1184                }
1185
1186                rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
1187                intel_context_put(ce);
1188                if (IS_ERR(rq[2]))
1189                        goto err;
1190
1191                err = wait_for_submit(engine, rq[B1], HZ / 2);
1192                if (err) {
1193                        pr_err("%s: failed to submit second context\n",
1194                               engine->name);
1195                        goto err;
1196                }
1197
1198                /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
1199                ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
1200                while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
1201                        /* Wait for the timeslice to kick in */
1202                        del_timer(&engine->execlists.timer);
1203                        tasklet_hi_schedule(&engine->sched_engine->tasklet);
1204                        intel_engine_flush_submission(engine);
1205                }
1206                /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
1207                GEM_BUG_ON(!i915_request_is_active(rq[A1]));
1208                GEM_BUG_ON(!i915_request_is_active(rq[B1]));
1209                GEM_BUG_ON(i915_request_is_active(rq[A2]));
1210
1211                /* Release the hounds! */
1212                slot[0] = 1;
1213                wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
1214
1215                for (i = 1; i <= 3; i++) {
1216                        unsigned long timeout = jiffies + HZ / 2;
1217
1218                        while (!READ_ONCE(slot[i]) &&
1219                               time_before(jiffies, timeout))
1220                                ;
1221
1222                        if (!time_before(jiffies, timeout)) {
1223                                pr_err("%s: rq[%d] timed out\n",
1224                                       engine->name, i - 1);
1225                                err = -ETIME;
1226                                goto err;
1227                        }
1228
1229                        pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
1230                }
1231
1232                /* XZY: XZ < XY */
1233                if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
1234                        pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
1235                               engine->name,
1236                               slot[Z] - slot[X],
1237                               slot[Y] - slot[X]);
1238                        err = -EINVAL;
1239                }
1240
1241err:
1242                memset32(&slot[0], -1, 4);
1243                wmb();
1244
1245                engine->props.timeslice_duration_ms = timeslice;
1246                st_engine_heartbeat_enable(engine);
1247                for (i = 0; i < 3; i++)
1248                        i915_request_put(rq[i]);
1249                if (igt_flush_test(gt->i915))
1250                        err = -EIO;
1251                if (err)
1252                        return err;
1253        }
1254
1255        return 0;
1256}
1257
1258static struct i915_request *nop_request(struct intel_engine_cs *engine)
1259{
1260        struct i915_request *rq;
1261
1262        rq = intel_engine_create_kernel_request(engine);
1263        if (IS_ERR(rq))
1264                return rq;
1265
1266        i915_request_get(rq);
1267        i915_request_add(rq);
1268
1269        return rq;
1270}
1271
1272static long slice_timeout(struct intel_engine_cs *engine)
1273{
1274        long timeout;
1275
1276        /* Enough time for a timeslice to kick in, and kick out */
1277        timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
1278
1279        /* Enough time for the nop request to complete */
1280        timeout += HZ / 5;
1281
1282        return timeout + 1;
1283}
1284
1285static int live_timeslice_queue(void *arg)
1286{
1287        struct intel_gt *gt = arg;
1288        struct drm_i915_gem_object *obj;
1289        struct intel_engine_cs *engine;
1290        enum intel_engine_id id;
1291        struct i915_vma *vma;
1292        void *vaddr;
1293        int err = 0;
1294
1295        /*
1296         * Make sure that even if ELSP[0] and ELSP[1] are filled with
1297         * timeslicing between them disabled, we *do* enable timeslicing
1298         * if the queue demands it. (Normally, we do not submit if
1299         * ELSP[1] is already occupied, so must rely on timeslicing to
1300         * eject ELSP[0] in favour of the queue.)
1301         */
1302        if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
1303                return 0;
1304
1305        obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
1306        if (IS_ERR(obj))
1307                return PTR_ERR(obj);
1308
1309        vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
1310        if (IS_ERR(vma)) {
1311                err = PTR_ERR(vma);
1312                goto err_obj;
1313        }
1314
1315        vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1316        if (IS_ERR(vaddr)) {
1317                err = PTR_ERR(vaddr);
1318                goto err_obj;
1319        }
1320
1321        err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1322        if (err)
1323                goto err_map;
1324
1325        err = i915_vma_sync(vma);
1326        if (err)
1327                goto err_pin;
1328
1329        for_each_engine(engine, gt, id) {
1330                struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
1331                struct i915_request *rq, *nop;
1332
1333                if (!intel_engine_has_preemption(engine))
1334                        continue;
1335
1336                st_engine_heartbeat_disable(engine);
1337                memset(vaddr, 0, PAGE_SIZE);
1338
1339                /* ELSP[0]: semaphore wait */
1340                rq = semaphore_queue(engine, vma, 0);
1341                if (IS_ERR(rq)) {
1342                        err = PTR_ERR(rq);
1343                        goto err_heartbeat;
1344                }
1345                engine->sched_engine->schedule(rq, &attr);
1346                err = wait_for_submit(engine, rq, HZ / 2);
1347                if (err) {
1348                        pr_err("%s: Timed out trying to submit semaphores\n",
1349                               engine->name);
1350                        goto err_rq;
1351                }
1352
1353                /* ELSP[1]: nop request */
1354                nop = nop_request(engine);
1355                if (IS_ERR(nop)) {
1356                        err = PTR_ERR(nop);
1357                        goto err_rq;
1358                }
1359                err = wait_for_submit(engine, nop, HZ / 2);
1360                i915_request_put(nop);
1361                if (err) {
1362                        pr_err("%s: Timed out trying to submit nop\n",
1363                               engine->name);
1364                        goto err_rq;
1365                }
1366
1367                GEM_BUG_ON(i915_request_completed(rq));
1368                GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
1369
1370                /* Queue: semaphore signal, matching priority as semaphore */
1371                err = release_queue(engine, vma, 1, effective_prio(rq));
1372                if (err)
1373                        goto err_rq;
1374
1375                /* Wait until we ack the release_queue and start timeslicing */
1376                do {
1377                        cond_resched();
1378                        intel_engine_flush_submission(engine);
1379                } while (READ_ONCE(engine->execlists.pending[0]));
1380
1381                /* Timeslice every jiffy, so within 2 we should signal */
1382                if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
1383                        struct drm_printer p =
1384                                drm_info_printer(gt->i915->drm.dev);
1385
1386                        pr_err("%s: Failed to timeslice into queue\n",
1387                               engine->name);
1388                        intel_engine_dump(engine, &p,
1389                                          "%s\n", engine->name);
1390
1391                        memset(vaddr, 0xff, PAGE_SIZE);
1392                        err = -EIO;
1393                }
1394err_rq:
1395                i915_request_put(rq);
1396err_heartbeat:
1397                st_engine_heartbeat_enable(engine);
1398                if (err)
1399                        break;
1400        }
1401
1402err_pin:
1403        i915_vma_unpin(vma);
1404err_map:
1405        i915_gem_object_unpin_map(obj);
1406err_obj:
1407        i915_gem_object_put(obj);
1408        return err;
1409}
1410
1411static int live_timeslice_nopreempt(void *arg)
1412{
1413        struct intel_gt *gt = arg;
1414        struct intel_engine_cs *engine;
1415        enum intel_engine_id id;
1416        struct igt_spinner spin;
1417        int err = 0;
1418
1419        /*
1420         * We should not timeslice into a request that is marked with
1421         * I915_REQUEST_NOPREEMPT.
1422         */
1423        if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
1424                return 0;
1425
1426        if (igt_spinner_init(&spin, gt))
1427                return -ENOMEM;
1428
1429        for_each_engine(engine, gt, id) {
1430                struct intel_context *ce;
1431                struct i915_request *rq;
1432                unsigned long timeslice;
1433
1434                if (!intel_engine_has_preemption(engine))
1435                        continue;
1436
1437                ce = intel_context_create(engine);
1438                if (IS_ERR(ce)) {
1439                        err = PTR_ERR(ce);
1440                        break;
1441                }
1442
1443                st_engine_heartbeat_disable(engine);
1444                timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
1445
1446                /* Create an unpreemptible spinner */
1447
1448                rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
1449                intel_context_put(ce);
1450                if (IS_ERR(rq)) {
1451                        err = PTR_ERR(rq);
1452                        goto out_heartbeat;
1453                }
1454
1455                i915_request_get(rq);
1456                i915_request_add(rq);
1457
1458                if (!igt_wait_for_spinner(&spin, rq)) {
1459                        i915_request_put(rq);
1460                        err = -ETIME;
1461                        goto out_spin;
1462                }
1463
1464                set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
1465                i915_request_put(rq);
1466
1467                /* Followed by a maximum priority barrier (heartbeat) */
1468
1469                ce = intel_context_create(engine);
1470                if (IS_ERR(ce)) {
1471                        err = PTR_ERR(ce);
1472                        goto out_spin;
1473                }
1474
1475                rq = intel_context_create_request(ce);
1476                intel_context_put(ce);
1477                if (IS_ERR(rq)) {
1478                        err = PTR_ERR(rq);
1479                        goto out_spin;
1480                }
1481
1482                rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1483                i915_request_get(rq);
1484                i915_request_add(rq);
1485
1486                /*
1487                 * Wait until the barrier is in ELSP, and we know timeslicing
1488                 * will have been activated.
1489                 */
1490                if (wait_for_submit(engine, rq, HZ / 2)) {
1491                        i915_request_put(rq);
1492                        err = -ETIME;
1493                        goto out_spin;
1494                }
1495
1496                /*
1497                 * Since the ELSP[0] request is unpreemptible, it should not
1498                 * allow the maximum priority barrier through. Wait long
1499                 * enough to see if it is timesliced in by mistake.
1500                 */
1501                if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
1502                        pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
1503                               engine->name);
1504                        err = -EINVAL;
1505                }
1506                i915_request_put(rq);
1507
1508out_spin:
1509                igt_spinner_end(&spin);
1510out_heartbeat:
1511                xchg(&engine->props.timeslice_duration_ms, timeslice);
1512                st_engine_heartbeat_enable(engine);
1513                if (err)
1514                        break;
1515
1516                if (igt_flush_test(gt->i915)) {
1517                        err = -EIO;
1518                        break;
1519                }
1520        }
1521
1522        igt_spinner_fini(&spin);
1523        return err;
1524}
1525
1526static int live_busywait_preempt(void *arg)
1527{
1528        struct intel_gt *gt = arg;
1529        struct i915_gem_context *ctx_hi, *ctx_lo;
1530        struct intel_engine_cs *engine;
1531        struct drm_i915_gem_object *obj;
1532        struct i915_vma *vma;
1533        enum intel_engine_id id;
1534        int err = -ENOMEM;
1535        u32 *map;
1536
1537        /*
1538         * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
1539         * preempt the busywaits used to synchronise between rings.
1540         */
1541
1542        ctx_hi = kernel_context(gt->i915, NULL);
1543        if (!ctx_hi)
1544                return -ENOMEM;
1545        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
1546
1547        ctx_lo = kernel_context(gt->i915, NULL);
1548        if (!ctx_lo)
1549                goto err_ctx_hi;
1550        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
1551
1552        obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
1553        if (IS_ERR(obj)) {
1554                err = PTR_ERR(obj);
1555                goto err_ctx_lo;
1556        }
1557
1558        map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1559        if (IS_ERR(map)) {
1560                err = PTR_ERR(map);
1561                goto err_obj;
1562        }
1563
1564        vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
1565        if (IS_ERR(vma)) {
1566                err = PTR_ERR(vma);
1567                goto err_map;
1568        }
1569
1570        err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
1571        if (err)
1572                goto err_map;
1573
1574        err = i915_vma_sync(vma);
1575        if (err)
1576                goto err_vma;
1577
1578        for_each_engine(engine, gt, id) {
1579                struct i915_request *lo, *hi;
1580                struct igt_live_test t;
1581                u32 *cs;
1582
1583                if (!intel_engine_has_preemption(engine))
1584                        continue;
1585
1586                if (!intel_engine_can_store_dword(engine))
1587                        continue;
1588
1589                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
1590                        err = -EIO;
1591                        goto err_vma;
1592                }
1593
1594                /*
1595                 * We create two requests. The low priority request
1596                 * busywaits on a semaphore (inside the ringbuffer where
1597                 * is should be preemptible) and the high priority requests
1598                 * uses a MI_STORE_DWORD_IMM to update the semaphore value
1599                 * allowing the first request to complete. If preemption
1600                 * fails, we hang instead.
1601                 */
1602
1603                lo = igt_request_alloc(ctx_lo, engine);
1604                if (IS_ERR(lo)) {
1605                        err = PTR_ERR(lo);
1606                        goto err_vma;
1607                }
1608
1609                cs = intel_ring_begin(lo, 8);
1610                if (IS_ERR(cs)) {
1611                        err = PTR_ERR(cs);
1612                        i915_request_add(lo);
1613                        goto err_vma;
1614                }
1615
1616                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1617                *cs++ = i915_ggtt_offset(vma);
1618                *cs++ = 0;
1619                *cs++ = 1;
1620
1621                /* XXX Do we need a flush + invalidate here? */
1622
1623                *cs++ = MI_SEMAPHORE_WAIT |
1624                        MI_SEMAPHORE_GLOBAL_GTT |
1625                        MI_SEMAPHORE_POLL |
1626                        MI_SEMAPHORE_SAD_EQ_SDD;
1627                *cs++ = 0;
1628                *cs++ = i915_ggtt_offset(vma);
1629                *cs++ = 0;
1630
1631                intel_ring_advance(lo, cs);
1632
1633                i915_request_get(lo);
1634                i915_request_add(lo);
1635
1636                if (wait_for(READ_ONCE(*map), 10)) {
1637                        i915_request_put(lo);
1638                        err = -ETIMEDOUT;
1639                        goto err_vma;
1640                }
1641
1642                /* Low priority request should be busywaiting now */
1643                if (i915_request_wait(lo, 0, 1) != -ETIME) {
1644                        i915_request_put(lo);
1645                        pr_err("%s: Busywaiting request did not!\n",
1646                               engine->name);
1647                        err = -EIO;
1648                        goto err_vma;
1649                }
1650
1651                hi = igt_request_alloc(ctx_hi, engine);
1652                if (IS_ERR(hi)) {
1653                        err = PTR_ERR(hi);
1654                        i915_request_put(lo);
1655                        goto err_vma;
1656                }
1657
1658                cs = intel_ring_begin(hi, 4);
1659                if (IS_ERR(cs)) {
1660                        err = PTR_ERR(cs);
1661                        i915_request_add(hi);
1662                        i915_request_put(lo);
1663                        goto err_vma;
1664                }
1665
1666                *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1667                *cs++ = i915_ggtt_offset(vma);
1668                *cs++ = 0;
1669                *cs++ = 0;
1670
1671                intel_ring_advance(hi, cs);
1672                i915_request_add(hi);
1673
1674                if (i915_request_wait(lo, 0, HZ / 5) < 0) {
1675                        struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1676
1677                        pr_err("%s: Failed to preempt semaphore busywait!\n",
1678                               engine->name);
1679
1680                        intel_engine_dump(engine, &p, "%s\n", engine->name);
1681                        GEM_TRACE_DUMP();
1682
1683                        i915_request_put(lo);
1684                        intel_gt_set_wedged(gt);
1685                        err = -EIO;
1686                        goto err_vma;
1687                }
1688                GEM_BUG_ON(READ_ONCE(*map));
1689                i915_request_put(lo);
1690
1691                if (igt_live_test_end(&t)) {
1692                        err = -EIO;
1693                        goto err_vma;
1694                }
1695        }
1696
1697        err = 0;
1698err_vma:
1699        i915_vma_unpin(vma);
1700err_map:
1701        i915_gem_object_unpin_map(obj);
1702err_obj:
1703        i915_gem_object_put(obj);
1704err_ctx_lo:
1705        kernel_context_close(ctx_lo);
1706err_ctx_hi:
1707        kernel_context_close(ctx_hi);
1708        return err;
1709}
1710
1711static struct i915_request *
1712spinner_create_request(struct igt_spinner *spin,
1713                       struct i915_gem_context *ctx,
1714                       struct intel_engine_cs *engine,
1715                       u32 arb)
1716{
1717        struct intel_context *ce;
1718        struct i915_request *rq;
1719
1720        ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
1721        if (IS_ERR(ce))
1722                return ERR_CAST(ce);
1723
1724        rq = igt_spinner_create_request(spin, ce, arb);
1725        intel_context_put(ce);
1726        return rq;
1727}
1728
1729static int live_preempt(void *arg)
1730{
1731        struct intel_gt *gt = arg;
1732        struct i915_gem_context *ctx_hi, *ctx_lo;
1733        struct igt_spinner spin_hi, spin_lo;
1734        struct intel_engine_cs *engine;
1735        enum intel_engine_id id;
1736        int err = -ENOMEM;
1737
1738        if (igt_spinner_init(&spin_hi, gt))
1739                return -ENOMEM;
1740
1741        if (igt_spinner_init(&spin_lo, gt))
1742                goto err_spin_hi;
1743
1744        ctx_hi = kernel_context(gt->i915, NULL);
1745        if (!ctx_hi)
1746                goto err_spin_lo;
1747        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
1748
1749        ctx_lo = kernel_context(gt->i915, NULL);
1750        if (!ctx_lo)
1751                goto err_ctx_hi;
1752        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
1753
1754        for_each_engine(engine, gt, id) {
1755                struct igt_live_test t;
1756                struct i915_request *rq;
1757
1758                if (!intel_engine_has_preemption(engine))
1759                        continue;
1760
1761                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
1762                        err = -EIO;
1763                        goto err_ctx_lo;
1764                }
1765
1766                rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1767                                            MI_ARB_CHECK);
1768                if (IS_ERR(rq)) {
1769                        err = PTR_ERR(rq);
1770                        goto err_ctx_lo;
1771                }
1772
1773                i915_request_add(rq);
1774                if (!igt_wait_for_spinner(&spin_lo, rq)) {
1775                        GEM_TRACE("lo spinner failed to start\n");
1776                        GEM_TRACE_DUMP();
1777                        intel_gt_set_wedged(gt);
1778                        err = -EIO;
1779                        goto err_ctx_lo;
1780                }
1781
1782                rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1783                                            MI_ARB_CHECK);
1784                if (IS_ERR(rq)) {
1785                        igt_spinner_end(&spin_lo);
1786                        err = PTR_ERR(rq);
1787                        goto err_ctx_lo;
1788                }
1789
1790                i915_request_add(rq);
1791                if (!igt_wait_for_spinner(&spin_hi, rq)) {
1792                        GEM_TRACE("hi spinner failed to start\n");
1793                        GEM_TRACE_DUMP();
1794                        intel_gt_set_wedged(gt);
1795                        err = -EIO;
1796                        goto err_ctx_lo;
1797                }
1798
1799                igt_spinner_end(&spin_hi);
1800                igt_spinner_end(&spin_lo);
1801
1802                if (igt_live_test_end(&t)) {
1803                        err = -EIO;
1804                        goto err_ctx_lo;
1805                }
1806        }
1807
1808        err = 0;
1809err_ctx_lo:
1810        kernel_context_close(ctx_lo);
1811err_ctx_hi:
1812        kernel_context_close(ctx_hi);
1813err_spin_lo:
1814        igt_spinner_fini(&spin_lo);
1815err_spin_hi:
1816        igt_spinner_fini(&spin_hi);
1817        return err;
1818}
1819
1820static int live_late_preempt(void *arg)
1821{
1822        struct intel_gt *gt = arg;
1823        struct i915_gem_context *ctx_hi, *ctx_lo;
1824        struct igt_spinner spin_hi, spin_lo;
1825        struct intel_engine_cs *engine;
1826        struct i915_sched_attr attr = {};
1827        enum intel_engine_id id;
1828        int err = -ENOMEM;
1829
1830        if (igt_spinner_init(&spin_hi, gt))
1831                return -ENOMEM;
1832
1833        if (igt_spinner_init(&spin_lo, gt))
1834                goto err_spin_hi;
1835
1836        ctx_hi = kernel_context(gt->i915, NULL);
1837        if (!ctx_hi)
1838                goto err_spin_lo;
1839
1840        ctx_lo = kernel_context(gt->i915, NULL);
1841        if (!ctx_lo)
1842                goto err_ctx_hi;
1843
1844        /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
1845        ctx_lo->sched.priority = 1;
1846
1847        for_each_engine(engine, gt, id) {
1848                struct igt_live_test t;
1849                struct i915_request *rq;
1850
1851                if (!intel_engine_has_preemption(engine))
1852                        continue;
1853
1854                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
1855                        err = -EIO;
1856                        goto err_ctx_lo;
1857                }
1858
1859                rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1860                                            MI_ARB_CHECK);
1861                if (IS_ERR(rq)) {
1862                        err = PTR_ERR(rq);
1863                        goto err_ctx_lo;
1864                }
1865
1866                i915_request_add(rq);
1867                if (!igt_wait_for_spinner(&spin_lo, rq)) {
1868                        pr_err("First context failed to start\n");
1869                        goto err_wedged;
1870                }
1871
1872                rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1873                                            MI_NOOP);
1874                if (IS_ERR(rq)) {
1875                        igt_spinner_end(&spin_lo);
1876                        err = PTR_ERR(rq);
1877                        goto err_ctx_lo;
1878                }
1879
1880                i915_request_add(rq);
1881                if (igt_wait_for_spinner(&spin_hi, rq)) {
1882                        pr_err("Second context overtook first?\n");
1883                        goto err_wedged;
1884                }
1885
1886                attr.priority = I915_PRIORITY_MAX;
1887                engine->sched_engine->schedule(rq, &attr);
1888
1889                if (!igt_wait_for_spinner(&spin_hi, rq)) {
1890                        pr_err("High priority context failed to preempt the low priority context\n");
1891                        GEM_TRACE_DUMP();
1892                        goto err_wedged;
1893                }
1894
1895                igt_spinner_end(&spin_hi);
1896                igt_spinner_end(&spin_lo);
1897
1898                if (igt_live_test_end(&t)) {
1899                        err = -EIO;
1900                        goto err_ctx_lo;
1901                }
1902        }
1903
1904        err = 0;
1905err_ctx_lo:
1906        kernel_context_close(ctx_lo);
1907err_ctx_hi:
1908        kernel_context_close(ctx_hi);
1909err_spin_lo:
1910        igt_spinner_fini(&spin_lo);
1911err_spin_hi:
1912        igt_spinner_fini(&spin_hi);
1913        return err;
1914
1915err_wedged:
1916        igt_spinner_end(&spin_hi);
1917        igt_spinner_end(&spin_lo);
1918        intel_gt_set_wedged(gt);
1919        err = -EIO;
1920        goto err_ctx_lo;
1921}
1922
1923struct preempt_client {
1924        struct igt_spinner spin;
1925        struct i915_gem_context *ctx;
1926};
1927
1928static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
1929{
1930        c->ctx = kernel_context(gt->i915, NULL);
1931        if (!c->ctx)
1932                return -ENOMEM;
1933
1934        if (igt_spinner_init(&c->spin, gt))
1935                goto err_ctx;
1936
1937        return 0;
1938
1939err_ctx:
1940        kernel_context_close(c->ctx);
1941        return -ENOMEM;
1942}
1943
1944static void preempt_client_fini(struct preempt_client *c)
1945{
1946        igt_spinner_fini(&c->spin);
1947        kernel_context_close(c->ctx);
1948}
1949
1950static int live_nopreempt(void *arg)
1951{
1952        struct intel_gt *gt = arg;
1953        struct intel_engine_cs *engine;
1954        struct preempt_client a, b;
1955        enum intel_engine_id id;
1956        int err = -ENOMEM;
1957
1958        /*
1959         * Verify that we can disable preemption for an individual request
1960         * that may be being observed and not want to be interrupted.
1961         */
1962
1963        if (preempt_client_init(gt, &a))
1964                return -ENOMEM;
1965        if (preempt_client_init(gt, &b))
1966                goto err_client_a;
1967        b.ctx->sched.priority = I915_PRIORITY_MAX;
1968
1969        for_each_engine(engine, gt, id) {
1970                struct i915_request *rq_a, *rq_b;
1971
1972                if (!intel_engine_has_preemption(engine))
1973                        continue;
1974
1975                engine->execlists.preempt_hang.count = 0;
1976
1977                rq_a = spinner_create_request(&a.spin,
1978                                              a.ctx, engine,
1979                                              MI_ARB_CHECK);
1980                if (IS_ERR(rq_a)) {
1981                        err = PTR_ERR(rq_a);
1982                        goto err_client_b;
1983                }
1984
1985                /* Low priority client, but unpreemptable! */
1986                __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
1987
1988                i915_request_add(rq_a);
1989                if (!igt_wait_for_spinner(&a.spin, rq_a)) {
1990                        pr_err("First client failed to start\n");
1991                        goto err_wedged;
1992                }
1993
1994                rq_b = spinner_create_request(&b.spin,
1995                                              b.ctx, engine,
1996                                              MI_ARB_CHECK);
1997                if (IS_ERR(rq_b)) {
1998                        err = PTR_ERR(rq_b);
1999                        goto err_client_b;
2000                }
2001
2002                i915_request_add(rq_b);
2003
2004                /* B is much more important than A! (But A is unpreemptable.) */
2005                GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
2006
2007                /* Wait long enough for preemption and timeslicing */
2008                if (igt_wait_for_spinner(&b.spin, rq_b)) {
2009                        pr_err("Second client started too early!\n");
2010                        goto err_wedged;
2011                }
2012
2013                igt_spinner_end(&a.spin);
2014
2015                if (!igt_wait_for_spinner(&b.spin, rq_b)) {
2016                        pr_err("Second client failed to start\n");
2017                        goto err_wedged;
2018                }
2019
2020                igt_spinner_end(&b.spin);
2021
2022                if (engine->execlists.preempt_hang.count) {
2023                        pr_err("Preemption recorded x%d; should have been suppressed!\n",
2024                               engine->execlists.preempt_hang.count);
2025                        err = -EINVAL;
2026                        goto err_wedged;
2027                }
2028
2029                if (igt_flush_test(gt->i915))
2030                        goto err_wedged;
2031        }
2032
2033        err = 0;
2034err_client_b:
2035        preempt_client_fini(&b);
2036err_client_a:
2037        preempt_client_fini(&a);
2038        return err;
2039
2040err_wedged:
2041        igt_spinner_end(&b.spin);
2042        igt_spinner_end(&a.spin);
2043        intel_gt_set_wedged(gt);
2044        err = -EIO;
2045        goto err_client_b;
2046}
2047
2048struct live_preempt_cancel {
2049        struct intel_engine_cs *engine;
2050        struct preempt_client a, b;
2051};
2052
2053static int __cancel_active0(struct live_preempt_cancel *arg)
2054{
2055        struct i915_request *rq;
2056        struct igt_live_test t;
2057        int err;
2058
2059        /* Preempt cancel of ELSP0 */
2060        GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2061        if (igt_live_test_begin(&t, arg->engine->i915,
2062                                __func__, arg->engine->name))
2063                return -EIO;
2064
2065        rq = spinner_create_request(&arg->a.spin,
2066                                    arg->a.ctx, arg->engine,
2067                                    MI_ARB_CHECK);
2068        if (IS_ERR(rq))
2069                return PTR_ERR(rq);
2070
2071        clear_bit(CONTEXT_BANNED, &rq->context->flags);
2072        i915_request_get(rq);
2073        i915_request_add(rq);
2074        if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2075                err = -EIO;
2076                goto out;
2077        }
2078
2079        intel_context_set_banned(rq->context);
2080        err = intel_engine_pulse(arg->engine);
2081        if (err)
2082                goto out;
2083
2084        err = wait_for_reset(arg->engine, rq, HZ / 2);
2085        if (err) {
2086                pr_err("Cancelled inflight0 request did not reset\n");
2087                goto out;
2088        }
2089
2090out:
2091        i915_request_put(rq);
2092        if (igt_live_test_end(&t))
2093                err = -EIO;
2094        return err;
2095}
2096
2097static int __cancel_active1(struct live_preempt_cancel *arg)
2098{
2099        struct i915_request *rq[2] = {};
2100        struct igt_live_test t;
2101        int err;
2102
2103        /* Preempt cancel of ELSP1 */
2104        GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2105        if (igt_live_test_begin(&t, arg->engine->i915,
2106                                __func__, arg->engine->name))
2107                return -EIO;
2108
2109        rq[0] = spinner_create_request(&arg->a.spin,
2110                                       arg->a.ctx, arg->engine,
2111                                       MI_NOOP); /* no preemption */
2112        if (IS_ERR(rq[0]))
2113                return PTR_ERR(rq[0]);
2114
2115        clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
2116        i915_request_get(rq[0]);
2117        i915_request_add(rq[0]);
2118        if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
2119                err = -EIO;
2120                goto out;
2121        }
2122
2123        rq[1] = spinner_create_request(&arg->b.spin,
2124                                       arg->b.ctx, arg->engine,
2125                                       MI_ARB_CHECK);
2126        if (IS_ERR(rq[1])) {
2127                err = PTR_ERR(rq[1]);
2128                goto out;
2129        }
2130
2131        clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
2132        i915_request_get(rq[1]);
2133        err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
2134        i915_request_add(rq[1]);
2135        if (err)
2136                goto out;
2137
2138        intel_context_set_banned(rq[1]->context);
2139        err = intel_engine_pulse(arg->engine);
2140        if (err)
2141                goto out;
2142
2143        igt_spinner_end(&arg->a.spin);
2144        err = wait_for_reset(arg->engine, rq[1], HZ / 2);
2145        if (err)
2146                goto out;
2147
2148        if (rq[0]->fence.error != 0) {
2149                pr_err("Normal inflight0 request did not complete\n");
2150                err = -EINVAL;
2151                goto out;
2152        }
2153
2154        if (rq[1]->fence.error != -EIO) {
2155                pr_err("Cancelled inflight1 request did not report -EIO\n");
2156                err = -EINVAL;
2157                goto out;
2158        }
2159
2160out:
2161        i915_request_put(rq[1]);
2162        i915_request_put(rq[0]);
2163        if (igt_live_test_end(&t))
2164                err = -EIO;
2165        return err;
2166}
2167
2168static int __cancel_queued(struct live_preempt_cancel *arg)
2169{
2170        struct i915_request *rq[3] = {};
2171        struct igt_live_test t;
2172        int err;
2173
2174        /* Full ELSP and one in the wings */
2175        GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2176        if (igt_live_test_begin(&t, arg->engine->i915,
2177                                __func__, arg->engine->name))
2178                return -EIO;
2179
2180        rq[0] = spinner_create_request(&arg->a.spin,
2181                                       arg->a.ctx, arg->engine,
2182                                       MI_ARB_CHECK);
2183        if (IS_ERR(rq[0]))
2184                return PTR_ERR(rq[0]);
2185
2186        clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
2187        i915_request_get(rq[0]);
2188        i915_request_add(rq[0]);
2189        if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
2190                err = -EIO;
2191                goto out;
2192        }
2193
2194        rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
2195        if (IS_ERR(rq[1])) {
2196                err = PTR_ERR(rq[1]);
2197                goto out;
2198        }
2199
2200        clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
2201        i915_request_get(rq[1]);
2202        err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
2203        i915_request_add(rq[1]);
2204        if (err)
2205                goto out;
2206
2207        rq[2] = spinner_create_request(&arg->b.spin,
2208                                       arg->a.ctx, arg->engine,
2209                                       MI_ARB_CHECK);
2210        if (IS_ERR(rq[2])) {
2211                err = PTR_ERR(rq[2]);
2212                goto out;
2213        }
2214
2215        i915_request_get(rq[2]);
2216        err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
2217        i915_request_add(rq[2]);
2218        if (err)
2219                goto out;
2220
2221        intel_context_set_banned(rq[2]->context);
2222        err = intel_engine_pulse(arg->engine);
2223        if (err)
2224                goto out;
2225
2226        err = wait_for_reset(arg->engine, rq[2], HZ / 2);
2227        if (err)
2228                goto out;
2229
2230        if (rq[0]->fence.error != -EIO) {
2231                pr_err("Cancelled inflight0 request did not report -EIO\n");
2232                err = -EINVAL;
2233                goto out;
2234        }
2235
2236        if (rq[1]->fence.error != 0) {
2237                pr_err("Normal inflight1 request did not complete\n");
2238                err = -EINVAL;
2239                goto out;
2240        }
2241
2242        if (rq[2]->fence.error != -EIO) {
2243                pr_err("Cancelled queued request did not report -EIO\n");
2244                err = -EINVAL;
2245                goto out;
2246        }
2247
2248out:
2249        i915_request_put(rq[2]);
2250        i915_request_put(rq[1]);
2251        i915_request_put(rq[0]);
2252        if (igt_live_test_end(&t))
2253                err = -EIO;
2254        return err;
2255}
2256
2257static int __cancel_hostile(struct live_preempt_cancel *arg)
2258{
2259        struct i915_request *rq;
2260        int err;
2261
2262        /* Preempt cancel non-preemptible spinner in ELSP0 */
2263        if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
2264                return 0;
2265
2266        if (!intel_has_reset_engine(arg->engine->gt))
2267                return 0;
2268
2269        GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
2270        rq = spinner_create_request(&arg->a.spin,
2271                                    arg->a.ctx, arg->engine,
2272                                    MI_NOOP); /* preemption disabled */
2273        if (IS_ERR(rq))
2274                return PTR_ERR(rq);
2275
2276        clear_bit(CONTEXT_BANNED, &rq->context->flags);
2277        i915_request_get(rq);
2278        i915_request_add(rq);
2279        if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2280                err = -EIO;
2281                goto out;
2282        }
2283
2284        intel_context_set_banned(rq->context);
2285        err = intel_engine_pulse(arg->engine); /* force reset */
2286        if (err)
2287                goto out;
2288
2289        err = wait_for_reset(arg->engine, rq, HZ / 2);
2290        if (err) {
2291                pr_err("Cancelled inflight0 request did not reset\n");
2292                goto out;
2293        }
2294
2295out:
2296        i915_request_put(rq);
2297        if (igt_flush_test(arg->engine->i915))
2298                err = -EIO;
2299        return err;
2300}
2301
2302static void force_reset_timeout(struct intel_engine_cs *engine)
2303{
2304        engine->reset_timeout.probability = 999;
2305        atomic_set(&engine->reset_timeout.times, -1);
2306}
2307
2308static void cancel_reset_timeout(struct intel_engine_cs *engine)
2309{
2310        memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
2311}
2312
2313static int __cancel_fail(struct live_preempt_cancel *arg)
2314{
2315        struct intel_engine_cs *engine = arg->engine;
2316        struct i915_request *rq;
2317        int err;
2318
2319        if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
2320                return 0;
2321
2322        if (!intel_has_reset_engine(engine->gt))
2323                return 0;
2324
2325        GEM_TRACE("%s(%s)\n", __func__, engine->name);
2326        rq = spinner_create_request(&arg->a.spin,
2327                                    arg->a.ctx, engine,
2328                                    MI_NOOP); /* preemption disabled */
2329        if (IS_ERR(rq))
2330                return PTR_ERR(rq);
2331
2332        clear_bit(CONTEXT_BANNED, &rq->context->flags);
2333        i915_request_get(rq);
2334        i915_request_add(rq);
2335        if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2336                err = -EIO;
2337                goto out;
2338        }
2339
2340        intel_context_set_banned(rq->context);
2341
2342        err = intel_engine_pulse(engine);
2343        if (err)
2344                goto out;
2345
2346        force_reset_timeout(engine);
2347
2348        /* force preempt reset [failure] */
2349        while (!engine->execlists.pending[0])
2350                intel_engine_flush_submission(engine);
2351        del_timer_sync(&engine->execlists.preempt);
2352        intel_engine_flush_submission(engine);
2353
2354        cancel_reset_timeout(engine);
2355
2356        /* after failure, require heartbeats to reset device */
2357        intel_engine_set_heartbeat(engine, 1);
2358        err = wait_for_reset(engine, rq, HZ / 2);
2359        intel_engine_set_heartbeat(engine,
2360                                   engine->defaults.heartbeat_interval_ms);
2361        if (err) {
2362                pr_err("Cancelled inflight0 request did not reset\n");
2363                goto out;
2364        }
2365
2366out:
2367        i915_request_put(rq);
2368        if (igt_flush_test(engine->i915))
2369                err = -EIO;
2370        return err;
2371}
2372
2373static int live_preempt_cancel(void *arg)
2374{
2375        struct intel_gt *gt = arg;
2376        struct live_preempt_cancel data;
2377        enum intel_engine_id id;
2378        int err = -ENOMEM;
2379
2380        /*
2381         * To cancel an inflight context, we need to first remove it from the
2382         * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
2383         */
2384
2385        if (preempt_client_init(gt, &data.a))
2386                return -ENOMEM;
2387        if (preempt_client_init(gt, &data.b))
2388                goto err_client_a;
2389
2390        for_each_engine(data.engine, gt, id) {
2391                if (!intel_engine_has_preemption(data.engine))
2392                        continue;
2393
2394                err = __cancel_active0(&data);
2395                if (err)
2396                        goto err_wedged;
2397
2398                err = __cancel_active1(&data);
2399                if (err)
2400                        goto err_wedged;
2401
2402                err = __cancel_queued(&data);
2403                if (err)
2404                        goto err_wedged;
2405
2406                err = __cancel_hostile(&data);
2407                if (err)
2408                        goto err_wedged;
2409
2410                err = __cancel_fail(&data);
2411                if (err)
2412                        goto err_wedged;
2413        }
2414
2415        err = 0;
2416err_client_b:
2417        preempt_client_fini(&data.b);
2418err_client_a:
2419        preempt_client_fini(&data.a);
2420        return err;
2421
2422err_wedged:
2423        GEM_TRACE_DUMP();
2424        igt_spinner_end(&data.b.spin);
2425        igt_spinner_end(&data.a.spin);
2426        intel_gt_set_wedged(gt);
2427        goto err_client_b;
2428}
2429
2430static int live_suppress_self_preempt(void *arg)
2431{
2432        struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
2433        struct intel_gt *gt = arg;
2434        struct intel_engine_cs *engine;
2435        struct preempt_client a, b;
2436        enum intel_engine_id id;
2437        int err = -ENOMEM;
2438
2439        /*
2440         * Verify that if a preemption request does not cause a change in
2441         * the current execution order, the preempt-to-idle injection is
2442         * skipped and that we do not accidentally apply it after the CS
2443         * completion event.
2444         */
2445
2446        if (intel_uc_uses_guc_submission(&gt->uc))
2447                return 0; /* presume black blox */
2448
2449        if (intel_vgpu_active(gt->i915))
2450                return 0; /* GVT forces single port & request submission */
2451
2452        if (preempt_client_init(gt, &a))
2453                return -ENOMEM;
2454        if (preempt_client_init(gt, &b))
2455                goto err_client_a;
2456
2457        for_each_engine(engine, gt, id) {
2458                struct i915_request *rq_a, *rq_b;
2459                int depth;
2460
2461                if (!intel_engine_has_preemption(engine))
2462                        continue;
2463
2464                if (igt_flush_test(gt->i915))
2465                        goto err_wedged;
2466
2467                st_engine_heartbeat_disable(engine);
2468                engine->execlists.preempt_hang.count = 0;
2469
2470                rq_a = spinner_create_request(&a.spin,
2471                                              a.ctx, engine,
2472                                              MI_NOOP);
2473                if (IS_ERR(rq_a)) {
2474                        err = PTR_ERR(rq_a);
2475                        st_engine_heartbeat_enable(engine);
2476                        goto err_client_b;
2477                }
2478
2479                i915_request_add(rq_a);
2480                if (!igt_wait_for_spinner(&a.spin, rq_a)) {
2481                        pr_err("First client failed to start\n");
2482                        st_engine_heartbeat_enable(engine);
2483                        goto err_wedged;
2484                }
2485
2486                /* Keep postponing the timer to avoid premature slicing */
2487                mod_timer(&engine->execlists.timer, jiffies + HZ);
2488                for (depth = 0; depth < 8; depth++) {
2489                        rq_b = spinner_create_request(&b.spin,
2490                                                      b.ctx, engine,
2491                                                      MI_NOOP);
2492                        if (IS_ERR(rq_b)) {
2493                                err = PTR_ERR(rq_b);
2494                                st_engine_heartbeat_enable(engine);
2495                                goto err_client_b;
2496                        }
2497                        i915_request_add(rq_b);
2498
2499                        GEM_BUG_ON(i915_request_completed(rq_a));
2500                        engine->sched_engine->schedule(rq_a, &attr);
2501                        igt_spinner_end(&a.spin);
2502
2503                        if (!igt_wait_for_spinner(&b.spin, rq_b)) {
2504                                pr_err("Second client failed to start\n");
2505                                st_engine_heartbeat_enable(engine);
2506                                goto err_wedged;
2507                        }
2508
2509                        swap(a, b);
2510                        rq_a = rq_b;
2511                }
2512                igt_spinner_end(&a.spin);
2513
2514                if (engine->execlists.preempt_hang.count) {
2515                        pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
2516                               engine->name,
2517                               engine->execlists.preempt_hang.count,
2518                               depth);
2519                        st_engine_heartbeat_enable(engine);
2520                        err = -EINVAL;
2521                        goto err_client_b;
2522                }
2523
2524                st_engine_heartbeat_enable(engine);
2525                if (igt_flush_test(gt->i915))
2526                        goto err_wedged;
2527        }
2528
2529        err = 0;
2530err_client_b:
2531        preempt_client_fini(&b);
2532err_client_a:
2533        preempt_client_fini(&a);
2534        return err;
2535
2536err_wedged:
2537        igt_spinner_end(&b.spin);
2538        igt_spinner_end(&a.spin);
2539        intel_gt_set_wedged(gt);
2540        err = -EIO;
2541        goto err_client_b;
2542}
2543
2544static int live_chain_preempt(void *arg)
2545{
2546        struct intel_gt *gt = arg;
2547        struct intel_engine_cs *engine;
2548        struct preempt_client hi, lo;
2549        enum intel_engine_id id;
2550        int err = -ENOMEM;
2551
2552        /*
2553         * Build a chain AB...BA between two contexts (A, B) and request
2554         * preemption of the last request. It should then complete before
2555         * the previously submitted spinner in B.
2556         */
2557
2558        if (preempt_client_init(gt, &hi))
2559                return -ENOMEM;
2560
2561        if (preempt_client_init(gt, &lo))
2562                goto err_client_hi;
2563
2564        for_each_engine(engine, gt, id) {
2565                struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
2566                struct igt_live_test t;
2567                struct i915_request *rq;
2568                int ring_size, count, i;
2569
2570                if (!intel_engine_has_preemption(engine))
2571                        continue;
2572
2573                rq = spinner_create_request(&lo.spin,
2574                                            lo.ctx, engine,
2575                                            MI_ARB_CHECK);
2576                if (IS_ERR(rq))
2577                        goto err_wedged;
2578
2579                i915_request_get(rq);
2580                i915_request_add(rq);
2581
2582                ring_size = rq->wa_tail - rq->head;
2583                if (ring_size < 0)
2584                        ring_size += rq->ring->size;
2585                ring_size = rq->ring->size / ring_size;
2586                pr_debug("%s(%s): Using maximum of %d requests\n",
2587                         __func__, engine->name, ring_size);
2588
2589                igt_spinner_end(&lo.spin);
2590                if (i915_request_wait(rq, 0, HZ / 2) < 0) {
2591                        pr_err("Timed out waiting to flush %s\n", engine->name);
2592                        i915_request_put(rq);
2593                        goto err_wedged;
2594                }
2595                i915_request_put(rq);
2596
2597                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
2598                        err = -EIO;
2599                        goto err_wedged;
2600                }
2601
2602                for_each_prime_number_from(count, 1, ring_size) {
2603                        rq = spinner_create_request(&hi.spin,
2604                                                    hi.ctx, engine,
2605                                                    MI_ARB_CHECK);
2606                        if (IS_ERR(rq))
2607                                goto err_wedged;
2608                        i915_request_add(rq);
2609                        if (!igt_wait_for_spinner(&hi.spin, rq))
2610                                goto err_wedged;
2611
2612                        rq = spinner_create_request(&lo.spin,
2613                                                    lo.ctx, engine,
2614                                                    MI_ARB_CHECK);
2615                        if (IS_ERR(rq))
2616                                goto err_wedged;
2617                        i915_request_add(rq);
2618
2619                        for (i = 0; i < count; i++) {
2620                                rq = igt_request_alloc(lo.ctx, engine);
2621                                if (IS_ERR(rq))
2622                                        goto err_wedged;
2623                                i915_request_add(rq);
2624                        }
2625
2626                        rq = igt_request_alloc(hi.ctx, engine);
2627                        if (IS_ERR(rq))
2628                                goto err_wedged;
2629
2630                        i915_request_get(rq);
2631                        i915_request_add(rq);
2632                        engine->sched_engine->schedule(rq, &attr);
2633
2634                        igt_spinner_end(&hi.spin);
2635                        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2636                                struct drm_printer p =
2637                                        drm_info_printer(gt->i915->drm.dev);
2638
2639                                pr_err("Failed to preempt over chain of %d\n",
2640                                       count);
2641                                intel_engine_dump(engine, &p,
2642                                                  "%s\n", engine->name);
2643                                i915_request_put(rq);
2644                                goto err_wedged;
2645                        }
2646                        igt_spinner_end(&lo.spin);
2647                        i915_request_put(rq);
2648
2649                        rq = igt_request_alloc(lo.ctx, engine);
2650                        if (IS_ERR(rq))
2651                                goto err_wedged;
2652
2653                        i915_request_get(rq);
2654                        i915_request_add(rq);
2655
2656                        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2657                                struct drm_printer p =
2658                                        drm_info_printer(gt->i915->drm.dev);
2659
2660                                pr_err("Failed to flush low priority chain of %d requests\n",
2661                                       count);
2662                                intel_engine_dump(engine, &p,
2663                                                  "%s\n", engine->name);
2664
2665                                i915_request_put(rq);
2666                                goto err_wedged;
2667                        }
2668                        i915_request_put(rq);
2669                }
2670
2671                if (igt_live_test_end(&t)) {
2672                        err = -EIO;
2673                        goto err_wedged;
2674                }
2675        }
2676
2677        err = 0;
2678err_client_lo:
2679        preempt_client_fini(&lo);
2680err_client_hi:
2681        preempt_client_fini(&hi);
2682        return err;
2683
2684err_wedged:
2685        igt_spinner_end(&hi.spin);
2686        igt_spinner_end(&lo.spin);
2687        intel_gt_set_wedged(gt);
2688        err = -EIO;
2689        goto err_client_lo;
2690}
2691
2692static int create_gang(struct intel_engine_cs *engine,
2693                       struct i915_request **prev)
2694{
2695        struct drm_i915_gem_object *obj;
2696        struct intel_context *ce;
2697        struct i915_request *rq;
2698        struct i915_vma *vma;
2699        u32 *cs;
2700        int err;
2701
2702        ce = intel_context_create(engine);
2703        if (IS_ERR(ce))
2704                return PTR_ERR(ce);
2705
2706        obj = i915_gem_object_create_internal(engine->i915, 4096);
2707        if (IS_ERR(obj)) {
2708                err = PTR_ERR(obj);
2709                goto err_ce;
2710        }
2711
2712        vma = i915_vma_instance(obj, ce->vm, NULL);
2713        if (IS_ERR(vma)) {
2714                err = PTR_ERR(vma);
2715                goto err_obj;
2716        }
2717
2718        err = i915_vma_pin(vma, 0, 0, PIN_USER);
2719        if (err)
2720                goto err_obj;
2721
2722        cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
2723        if (IS_ERR(cs)) {
2724                err = PTR_ERR(cs);
2725                goto err_obj;
2726        }
2727
2728        /* Semaphore target: spin until zero */
2729        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2730
2731        *cs++ = MI_SEMAPHORE_WAIT |
2732                MI_SEMAPHORE_POLL |
2733                MI_SEMAPHORE_SAD_EQ_SDD;
2734        *cs++ = 0;
2735        *cs++ = lower_32_bits(vma->node.start);
2736        *cs++ = upper_32_bits(vma->node.start);
2737
2738        if (*prev) {
2739                u64 offset = (*prev)->batch->node.start;
2740
2741                /* Terminate the spinner in the next lower priority batch. */
2742                *cs++ = MI_STORE_DWORD_IMM_GEN4;
2743                *cs++ = lower_32_bits(offset);
2744                *cs++ = upper_32_bits(offset);
2745                *cs++ = 0;
2746        }
2747
2748        *cs++ = MI_BATCH_BUFFER_END;
2749        i915_gem_object_flush_map(obj);
2750        i915_gem_object_unpin_map(obj);
2751
2752        rq = intel_context_create_request(ce);
2753        if (IS_ERR(rq)) {
2754                err = PTR_ERR(rq);
2755                goto err_obj;
2756        }
2757
2758        rq->batch = i915_vma_get(vma);
2759        i915_request_get(rq);
2760
2761        i915_vma_lock(vma);
2762        err = i915_request_await_object(rq, vma->obj, false);
2763        if (!err)
2764                err = i915_vma_move_to_active(vma, rq, 0);
2765        if (!err)
2766                err = rq->engine->emit_bb_start(rq,
2767                                                vma->node.start,
2768                                                PAGE_SIZE, 0);
2769        i915_vma_unlock(vma);
2770        i915_request_add(rq);
2771        if (err)
2772                goto err_rq;
2773
2774        i915_gem_object_put(obj);
2775        intel_context_put(ce);
2776
2777        rq->mock.link.next = &(*prev)->mock.link;
2778        *prev = rq;
2779        return 0;
2780
2781err_rq:
2782        i915_vma_put(rq->batch);
2783        i915_request_put(rq);
2784err_obj:
2785        i915_gem_object_put(obj);
2786err_ce:
2787        intel_context_put(ce);
2788        return err;
2789}
2790
2791static int __live_preempt_ring(struct intel_engine_cs *engine,
2792                               struct igt_spinner *spin,
2793                               int queue_sz, int ring_sz)
2794{
2795        struct intel_context *ce[2] = {};
2796        struct i915_request *rq;
2797        struct igt_live_test t;
2798        int err = 0;
2799        int n;
2800
2801        if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
2802                return -EIO;
2803
2804        for (n = 0; n < ARRAY_SIZE(ce); n++) {
2805                struct intel_context *tmp;
2806
2807                tmp = intel_context_create(engine);
2808                if (IS_ERR(tmp)) {
2809                        err = PTR_ERR(tmp);
2810                        goto err_ce;
2811                }
2812
2813                tmp->ring_size = ring_sz;
2814
2815                err = intel_context_pin(tmp);
2816                if (err) {
2817                        intel_context_put(tmp);
2818                        goto err_ce;
2819                }
2820
2821                memset32(tmp->ring->vaddr,
2822                         0xdeadbeef, /* trigger a hang if executed */
2823                         tmp->ring->vma->size / sizeof(u32));
2824
2825                ce[n] = tmp;
2826        }
2827
2828        rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
2829        if (IS_ERR(rq)) {
2830                err = PTR_ERR(rq);
2831                goto err_ce;
2832        }
2833
2834        i915_request_get(rq);
2835        rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2836        i915_request_add(rq);
2837
2838        if (!igt_wait_for_spinner(spin, rq)) {
2839                intel_gt_set_wedged(engine->gt);
2840                i915_request_put(rq);
2841                err = -ETIME;
2842                goto err_ce;
2843        }
2844
2845        /* Fill the ring, until we will cause a wrap */
2846        n = 0;
2847        while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
2848                struct i915_request *tmp;
2849
2850                tmp = intel_context_create_request(ce[0]);
2851                if (IS_ERR(tmp)) {
2852                        err = PTR_ERR(tmp);
2853                        i915_request_put(rq);
2854                        goto err_ce;
2855                }
2856
2857                i915_request_add(tmp);
2858                intel_engine_flush_submission(engine);
2859                n++;
2860        }
2861        intel_engine_flush_submission(engine);
2862        pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
2863                 engine->name, queue_sz, n,
2864                 ce[0]->ring->size,
2865                 ce[0]->ring->tail,
2866                 ce[0]->ring->emit,
2867                 rq->tail);
2868        i915_request_put(rq);
2869
2870        /* Create a second request to preempt the first ring */
2871        rq = intel_context_create_request(ce[1]);
2872        if (IS_ERR(rq)) {
2873                err = PTR_ERR(rq);
2874                goto err_ce;
2875        }
2876
2877        rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2878        i915_request_get(rq);
2879        i915_request_add(rq);
2880
2881        err = wait_for_submit(engine, rq, HZ / 2);
2882        i915_request_put(rq);
2883        if (err) {
2884                pr_err("%s: preemption request was not submitted\n",
2885                       engine->name);
2886                err = -ETIME;
2887        }
2888
2889        pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
2890                 engine->name,
2891                 ce[0]->ring->tail, ce[0]->ring->emit,
2892                 ce[1]->ring->tail, ce[1]->ring->emit);
2893
2894err_ce:
2895        intel_engine_flush_submission(engine);
2896        igt_spinner_end(spin);
2897        for (n = 0; n < ARRAY_SIZE(ce); n++) {
2898                if (IS_ERR_OR_NULL(ce[n]))
2899                        break;
2900
2901                intel_context_unpin(ce[n]);
2902                intel_context_put(ce[n]);
2903        }
2904        if (igt_live_test_end(&t))
2905                err = -EIO;
2906        return err;
2907}
2908
2909static int live_preempt_ring(void *arg)
2910{
2911        struct intel_gt *gt = arg;
2912        struct intel_engine_cs *engine;
2913        struct igt_spinner spin;
2914        enum intel_engine_id id;
2915        int err = 0;
2916
2917        /*
2918         * Check that we rollback large chunks of a ring in order to do a
2919         * preemption event. Similar to live_unlite_ring, but looking at
2920         * ring size rather than the impact of intel_ring_direction().
2921         */
2922
2923        if (igt_spinner_init(&spin, gt))
2924                return -ENOMEM;
2925
2926        for_each_engine(engine, gt, id) {
2927                int n;
2928
2929                if (!intel_engine_has_preemption(engine))
2930                        continue;
2931
2932                if (!intel_engine_can_store_dword(engine))
2933                        continue;
2934
2935                st_engine_heartbeat_disable(engine);
2936
2937                for (n = 0; n <= 3; n++) {
2938                        err = __live_preempt_ring(engine, &spin,
2939                                                  n * SZ_4K / 4, SZ_4K);
2940                        if (err)
2941                                break;
2942                }
2943
2944                st_engine_heartbeat_enable(engine);
2945                if (err)
2946                        break;
2947        }
2948
2949        igt_spinner_fini(&spin);
2950        return err;
2951}
2952
2953static int live_preempt_gang(void *arg)
2954{
2955        struct intel_gt *gt = arg;
2956        struct intel_engine_cs *engine;
2957        enum intel_engine_id id;
2958
2959        /*
2960         * Build as long a chain of preempters as we can, with each
2961         * request higher priority than the last. Once we are ready, we release
2962         * the last batch which then precolates down the chain, each releasing
2963         * the next oldest in turn. The intent is to simply push as hard as we
2964         * can with the number of preemptions, trying to exceed narrow HW
2965         * limits. At a minimum, we insist that we can sort all the user
2966         * high priority levels into execution order.
2967         */
2968
2969        for_each_engine(engine, gt, id) {
2970                struct i915_request *rq = NULL;
2971                struct igt_live_test t;
2972                IGT_TIMEOUT(end_time);
2973                int prio = 0;
2974                int err = 0;
2975                u32 *cs;
2976
2977                if (!intel_engine_has_preemption(engine))
2978                        continue;
2979
2980                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
2981                        return -EIO;
2982
2983                do {
2984                        struct i915_sched_attr attr = { .priority = prio++ };
2985
2986                        err = create_gang(engine, &rq);
2987                        if (err)
2988                                break;
2989
2990                        /* Submit each spinner at increasing priority */
2991                        engine->sched_engine->schedule(rq, &attr);
2992                } while (prio <= I915_PRIORITY_MAX &&
2993                         !__igt_timeout(end_time, NULL));
2994                pr_debug("%s: Preempt chain of %d requests\n",
2995                         engine->name, prio);
2996
2997                /*
2998                 * Such that the last spinner is the highest priority and
2999                 * should execute first. When that spinner completes,
3000                 * it will terminate the next lowest spinner until there
3001                 * are no more spinners and the gang is complete.
3002                 */
3003                cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
3004                if (!IS_ERR(cs)) {
3005                        *cs = 0;
3006                        i915_gem_object_unpin_map(rq->batch->obj);
3007                } else {
3008                        err = PTR_ERR(cs);
3009                        intel_gt_set_wedged(gt);
3010                }
3011
3012                while (rq) { /* wait for each rq from highest to lowest prio */
3013                        struct i915_request *n = list_next_entry(rq, mock.link);
3014
3015                        if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
3016                                struct drm_printer p =
3017                                        drm_info_printer(engine->i915->drm.dev);
3018
3019                                pr_err("Failed to flush chain of %d requests, at %d\n",
3020                                       prio, rq_prio(rq));
3021                                intel_engine_dump(engine, &p,
3022                                                  "%s\n", engine->name);
3023
3024                                err = -ETIME;
3025                        }
3026
3027                        i915_vma_put(rq->batch);
3028                        i915_request_put(rq);
3029                        rq = n;
3030                }
3031
3032                if (igt_live_test_end(&t))
3033                        err = -EIO;
3034                if (err)
3035                        return err;
3036        }
3037
3038        return 0;
3039}
3040
3041static struct i915_vma *
3042create_gpr_user(struct intel_engine_cs *engine,
3043                struct i915_vma *result,
3044                unsigned int offset)
3045{
3046        struct drm_i915_gem_object *obj;
3047        struct i915_vma *vma;
3048        u32 *cs;
3049        int err;
3050        int i;
3051
3052        obj = i915_gem_object_create_internal(engine->i915, 4096);
3053        if (IS_ERR(obj))
3054                return ERR_CAST(obj);
3055
3056        vma = i915_vma_instance(obj, result->vm, NULL);
3057        if (IS_ERR(vma)) {
3058                i915_gem_object_put(obj);
3059                return vma;
3060        }
3061
3062        err = i915_vma_pin(vma, 0, 0, PIN_USER);
3063        if (err) {
3064                i915_vma_put(vma);
3065                return ERR_PTR(err);
3066        }
3067
3068        cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
3069        if (IS_ERR(cs)) {
3070                i915_vma_put(vma);
3071                return ERR_CAST(cs);
3072        }
3073
3074        /* All GPR are clear for new contexts. We use GPR(0) as a constant */
3075        *cs++ = MI_LOAD_REGISTER_IMM(1);
3076        *cs++ = CS_GPR(engine, 0);
3077        *cs++ = 1;
3078
3079        for (i = 1; i < NUM_GPR; i++) {
3080                u64 addr;
3081
3082                /*
3083                 * Perform: GPR[i]++
3084                 *
3085                 * As we read and write into the context saved GPR[i], if
3086                 * we restart this batch buffer from an earlier point, we
3087                 * will repeat the increment and store a value > 1.
3088                 */
3089                *cs++ = MI_MATH(4);
3090                *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
3091                *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
3092                *cs++ = MI_MATH_ADD;
3093                *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
3094
3095                addr = result->node.start + offset + i * sizeof(*cs);
3096                *cs++ = MI_STORE_REGISTER_MEM_GEN8;
3097                *cs++ = CS_GPR(engine, 2 * i);
3098                *cs++ = lower_32_bits(addr);
3099                *cs++ = upper_32_bits(addr);
3100
3101                *cs++ = MI_SEMAPHORE_WAIT |
3102                        MI_SEMAPHORE_POLL |
3103                        MI_SEMAPHORE_SAD_GTE_SDD;
3104                *cs++ = i;
3105                *cs++ = lower_32_bits(result->node.start);
3106                *cs++ = upper_32_bits(result->node.start);
3107        }
3108
3109        *cs++ = MI_BATCH_BUFFER_END;
3110        i915_gem_object_flush_map(obj);
3111        i915_gem_object_unpin_map(obj);
3112
3113        return vma;
3114}
3115
3116static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
3117{
3118        struct drm_i915_gem_object *obj;
3119        struct i915_vma *vma;
3120        int err;
3121
3122        obj = i915_gem_object_create_internal(gt->i915, sz);
3123        if (IS_ERR(obj))
3124                return ERR_CAST(obj);
3125
3126        vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
3127        if (IS_ERR(vma)) {
3128                i915_gem_object_put(obj);
3129                return vma;
3130        }
3131
3132        err = i915_ggtt_pin(vma, NULL, 0, 0);
3133        if (err) {
3134                i915_vma_put(vma);
3135                return ERR_PTR(err);
3136        }
3137
3138        return vma;
3139}
3140
3141static struct i915_request *
3142create_gpr_client(struct intel_engine_cs *engine,
3143                  struct i915_vma *global,
3144                  unsigned int offset)
3145{
3146        struct i915_vma *batch, *vma;
3147        struct intel_context *ce;
3148        struct i915_request *rq;
3149        int err;
3150
3151        ce = intel_context_create(engine);
3152        if (IS_ERR(ce))
3153                return ERR_CAST(ce);
3154
3155        vma = i915_vma_instance(global->obj, ce->vm, NULL);
3156        if (IS_ERR(vma)) {
3157                err = PTR_ERR(vma);
3158                goto out_ce;
3159        }
3160
3161        err = i915_vma_pin(vma, 0, 0, PIN_USER);
3162        if (err)
3163                goto out_ce;
3164
3165        batch = create_gpr_user(engine, vma, offset);
3166        if (IS_ERR(batch)) {
3167                err = PTR_ERR(batch);
3168                goto out_vma;
3169        }
3170
3171        rq = intel_context_create_request(ce);
3172        if (IS_ERR(rq)) {
3173                err = PTR_ERR(rq);
3174                goto out_batch;
3175        }
3176
3177        i915_vma_lock(vma);
3178        err = i915_request_await_object(rq, vma->obj, false);
3179        if (!err)
3180                err = i915_vma_move_to_active(vma, rq, 0);
3181        i915_vma_unlock(vma);
3182
3183        i915_vma_lock(batch);
3184        if (!err)
3185                err = i915_request_await_object(rq, batch->obj, false);
3186        if (!err)
3187                err = i915_vma_move_to_active(batch, rq, 0);
3188        if (!err)
3189                err = rq->engine->emit_bb_start(rq,
3190                                                batch->node.start,
3191                                                PAGE_SIZE, 0);
3192        i915_vma_unlock(batch);
3193        i915_vma_unpin(batch);
3194
3195        if (!err)
3196                i915_request_get(rq);
3197        i915_request_add(rq);
3198
3199out_batch:
3200        i915_vma_put(batch);
3201out_vma:
3202        i915_vma_unpin(vma);
3203out_ce:
3204        intel_context_put(ce);
3205        return err ? ERR_PTR(err) : rq;
3206}
3207
3208static int preempt_user(struct intel_engine_cs *engine,
3209                        struct i915_vma *global,
3210                        int id)
3211{
3212        struct i915_sched_attr attr = {
3213                .priority = I915_PRIORITY_MAX
3214        };
3215        struct i915_request *rq;
3216        int err = 0;
3217        u32 *cs;
3218
3219        rq = intel_engine_create_kernel_request(engine);
3220        if (IS_ERR(rq))
3221                return PTR_ERR(rq);
3222
3223        cs = intel_ring_begin(rq, 4);
3224        if (IS_ERR(cs)) {
3225                i915_request_add(rq);
3226                return PTR_ERR(cs);
3227        }
3228
3229        *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
3230        *cs++ = i915_ggtt_offset(global);
3231        *cs++ = 0;
3232        *cs++ = id;
3233
3234        intel_ring_advance(rq, cs);
3235
3236        i915_request_get(rq);
3237        i915_request_add(rq);
3238
3239        engine->sched_engine->schedule(rq, &attr);
3240
3241        if (i915_request_wait(rq, 0, HZ / 2) < 0)
3242                err = -ETIME;
3243        i915_request_put(rq);
3244
3245        return err;
3246}
3247
3248static int live_preempt_user(void *arg)
3249{
3250        struct intel_gt *gt = arg;
3251        struct intel_engine_cs *engine;
3252        struct i915_vma *global;
3253        enum intel_engine_id id;
3254        u32 *result;
3255        int err = 0;
3256
3257        /*
3258         * In our other tests, we look at preemption in carefully
3259         * controlled conditions in the ringbuffer. Since most of the
3260         * time is spent in user batches, most of our preemptions naturally
3261         * occur there. We want to verify that when we preempt inside a batch
3262         * we continue on from the current instruction and do not roll back
3263         * to the start, or another earlier arbitration point.
3264         *
3265         * To verify this, we create a batch which is a mixture of
3266         * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
3267         * a few preempting contexts thrown into the mix, we look for any
3268         * repeated instructions (which show up as incorrect values).
3269         */
3270
3271        global = create_global(gt, 4096);
3272        if (IS_ERR(global))
3273                return PTR_ERR(global);
3274
3275        result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
3276        if (IS_ERR(result)) {
3277                i915_vma_unpin_and_release(&global, 0);
3278                return PTR_ERR(result);
3279        }
3280
3281        for_each_engine(engine, gt, id) {
3282                struct i915_request *client[3] = {};
3283                struct igt_live_test t;
3284                int i;
3285
3286                if (!intel_engine_has_preemption(engine))
3287                        continue;
3288
3289                if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
3290                        continue; /* we need per-context GPR */
3291
3292                if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
3293                        err = -EIO;
3294                        break;
3295                }
3296
3297                memset(result, 0, 4096);
3298
3299                for (i = 0; i < ARRAY_SIZE(client); i++) {
3300                        struct i915_request *rq;
3301
3302                        rq = create_gpr_client(engine, global,
3303                                               NUM_GPR * i * sizeof(u32));
3304                        if (IS_ERR(rq)) {
3305                                err = PTR_ERR(rq);
3306                                goto end_test;
3307                        }
3308
3309                        client[i] = rq;
3310                }
3311
3312                /* Continuously preempt the set of 3 running contexts */
3313                for (i = 1; i <= NUM_GPR; i++) {
3314                        err = preempt_user(engine, global, i);
3315                        if (err)
3316                                goto end_test;
3317                }
3318
3319                if (READ_ONCE(result[0]) != NUM_GPR) {
3320                        pr_err("%s: Failed to release semaphore\n",
3321                               engine->name);
3322                        err = -EIO;
3323                        goto end_test;
3324                }
3325
3326                for (i = 0; i < ARRAY_SIZE(client); i++) {
3327                        int gpr;
3328
3329                        if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
3330                                err = -ETIME;
3331                                goto end_test;
3332                        }
3333
3334                        for (gpr = 1; gpr < NUM_GPR; gpr++) {
3335                                if (result[NUM_GPR * i + gpr] != 1) {
3336                                        pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
3337                                               engine->name,
3338                                               i, gpr, result[NUM_GPR * i + gpr]);
3339                                        err = -EINVAL;
3340                                        goto end_test;
3341                                }
3342                        }
3343                }
3344
3345end_test:
3346                for (i = 0; i < ARRAY_SIZE(client); i++) {
3347                        if (!client[i])
3348                                break;
3349
3350                        i915_request_put(client[i]);
3351                }
3352
3353                /* Flush the semaphores on error */
3354                smp_store_mb(result[0], -1);
3355                if (igt_live_test_end(&t))
3356                        err = -EIO;
3357                if (err)
3358                        break;
3359        }
3360
3361        i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
3362        return err;
3363}
3364
3365static int live_preempt_timeout(void *arg)
3366{
3367        struct intel_gt *gt = arg;
3368        struct i915_gem_context *ctx_hi, *ctx_lo;
3369        struct igt_spinner spin_lo;
3370        struct intel_engine_cs *engine;
3371        enum intel_engine_id id;
3372        int err = -ENOMEM;
3373
3374        /*
3375         * Check that we force preemption to occur by cancelling the previous
3376         * context if it refuses to yield the GPU.
3377         */
3378        if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
3379                return 0;
3380
3381        if (!intel_has_reset_engine(gt))
3382                return 0;
3383
3384        if (igt_spinner_init(&spin_lo, gt))
3385                return -ENOMEM;
3386
3387        ctx_hi = kernel_context(gt->i915, NULL);
3388        if (!ctx_hi)
3389                goto err_spin_lo;
3390        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
3391
3392        ctx_lo = kernel_context(gt->i915, NULL);
3393        if (!ctx_lo)
3394                goto err_ctx_hi;
3395        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
3396
3397        for_each_engine(engine, gt, id) {
3398                unsigned long saved_timeout;
3399                struct i915_request *rq;
3400
3401                if (!intel_engine_has_preemption(engine))
3402                        continue;
3403
3404                rq = spinner_create_request(&spin_lo, ctx_lo, engine,
3405                                            MI_NOOP); /* preemption disabled */
3406                if (IS_ERR(rq)) {
3407                        err = PTR_ERR(rq);
3408                        goto err_ctx_lo;
3409                }
3410
3411                i915_request_add(rq);
3412                if (!igt_wait_for_spinner(&spin_lo, rq)) {
3413                        intel_gt_set_wedged(gt);
3414                        err = -EIO;
3415                        goto err_ctx_lo;
3416                }
3417
3418                rq = igt_request_alloc(ctx_hi, engine);
3419                if (IS_ERR(rq)) {
3420                        igt_spinner_end(&spin_lo);
3421                        err = PTR_ERR(rq);
3422                        goto err_ctx_lo;
3423                }
3424
3425                /* Flush the previous CS ack before changing timeouts */
3426                while (READ_ONCE(engine->execlists.pending[0]))
3427                        cpu_relax();
3428
3429                saved_timeout = engine->props.preempt_timeout_ms;
3430                engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
3431
3432                i915_request_get(rq);
3433                i915_request_add(rq);
3434
3435                intel_engine_flush_submission(engine);
3436                engine->props.preempt_timeout_ms = saved_timeout;
3437
3438                if (i915_request_wait(rq, 0, HZ / 10) < 0) {
3439                        intel_gt_set_wedged(gt);
3440                        i915_request_put(rq);
3441                        err = -ETIME;
3442                        goto err_ctx_lo;
3443                }
3444
3445                igt_spinner_end(&spin_lo);
3446                i915_request_put(rq);
3447        }
3448
3449        err = 0;
3450err_ctx_lo:
3451        kernel_context_close(ctx_lo);
3452err_ctx_hi:
3453        kernel_context_close(ctx_hi);
3454err_spin_lo:
3455        igt_spinner_fini(&spin_lo);
3456        return err;
3457}
3458
3459static int random_range(struct rnd_state *rnd, int min, int max)
3460{
3461        return i915_prandom_u32_max_state(max - min, rnd) + min;
3462}
3463
3464static int random_priority(struct rnd_state *rnd)
3465{
3466        return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
3467}
3468
3469struct preempt_smoke {
3470        struct intel_gt *gt;
3471        struct i915_gem_context **contexts;
3472        struct intel_engine_cs *engine;
3473        struct drm_i915_gem_object *batch;
3474        unsigned int ncontext;
3475        struct rnd_state prng;
3476        unsigned long count;
3477};
3478
3479static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
3480{
3481        return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
3482                                                          &smoke->prng)];
3483}
3484
3485static int smoke_submit(struct preempt_smoke *smoke,
3486                        struct i915_gem_context *ctx, int prio,
3487                        struct drm_i915_gem_object *batch)
3488{
3489        struct i915_request *rq;
3490        struct i915_vma *vma = NULL;
3491        int err = 0;
3492
3493        if (batch) {
3494                struct i915_address_space *vm;
3495
3496                vm = i915_gem_context_get_eb_vm(ctx);
3497                vma = i915_vma_instance(batch, vm, NULL);
3498                i915_vm_put(vm);
3499                if (IS_ERR(vma))
3500                        return PTR_ERR(vma);
3501
3502                err = i915_vma_pin(vma, 0, 0, PIN_USER);
3503                if (err)
3504                        return err;
3505        }
3506
3507        ctx->sched.priority = prio;
3508
3509        rq = igt_request_alloc(ctx, smoke->engine);
3510        if (IS_ERR(rq)) {
3511                err = PTR_ERR(rq);
3512                goto unpin;
3513        }
3514
3515        if (vma) {
3516                i915_vma_lock(vma);
3517                err = i915_request_await_object(rq, vma->obj, false);
3518                if (!err)
3519                        err = i915_vma_move_to_active(vma, rq, 0);
3520                if (!err)
3521                        err = rq->engine->emit_bb_start(rq,
3522                                                        vma->node.start,
3523                                                        PAGE_SIZE, 0);
3524                i915_vma_unlock(vma);
3525        }
3526
3527        i915_request_add(rq);
3528
3529unpin:
3530        if (vma)
3531                i915_vma_unpin(vma);
3532
3533        return err;
3534}
3535
3536static int smoke_crescendo_thread(void *arg)
3537{
3538        struct preempt_smoke *smoke = arg;
3539        IGT_TIMEOUT(end_time);
3540        unsigned long count;
3541
3542        count = 0;
3543        do {
3544                struct i915_gem_context *ctx = smoke_context(smoke);
3545                int err;
3546
3547                err = smoke_submit(smoke,
3548                                   ctx, count % I915_PRIORITY_MAX,
3549                                   smoke->batch);
3550                if (err)
3551                        return err;
3552
3553                count++;
3554        } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
3555
3556        smoke->count = count;
3557        return 0;
3558}
3559
3560static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
3561#define BATCH BIT(0)
3562{
3563        struct task_struct *tsk[I915_NUM_ENGINES] = {};
3564        struct preempt_smoke *arg;
3565        struct intel_engine_cs *engine;
3566        enum intel_engine_id id;
3567        unsigned long count;
3568        int err = 0;
3569
3570        arg = kmalloc_array(I915_NUM_ENGINES, sizeof(*arg), GFP_KERNEL);
3571        if (!arg)
3572                return -ENOMEM;
3573
3574        for_each_engine(engine, smoke->gt, id) {
3575                arg[id] = *smoke;
3576                arg[id].engine = engine;
3577                if (!(flags & BATCH))
3578                        arg[id].batch = NULL;
3579                arg[id].count = 0;
3580
3581                tsk[id] = kthread_run(smoke_crescendo_thread, arg,
3582                                      "igt/smoke:%d", id);
3583                if (IS_ERR(tsk[id])) {
3584                        err = PTR_ERR(tsk[id]);
3585                        break;
3586                }
3587                get_task_struct(tsk[id]);
3588        }
3589
3590        yield(); /* start all threads before we kthread_stop() */
3591
3592        count = 0;
3593        for_each_engine(engine, smoke->gt, id) {
3594                int status;
3595
3596                if (IS_ERR_OR_NULL(tsk[id]))
3597                        continue;
3598
3599                status = kthread_stop(tsk[id]);
3600                if (status && !err)
3601                        err = status;
3602
3603                count += arg[id].count;
3604
3605                put_task_struct(tsk[id]);
3606        }
3607
3608        pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
3609                count, flags, smoke->gt->info.num_engines, smoke->ncontext);
3610
3611        kfree(arg);
3612        return 0;
3613}
3614
3615static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
3616{
3617        enum intel_engine_id id;
3618        IGT_TIMEOUT(end_time);
3619        unsigned long count;
3620
3621        count = 0;
3622        do {
3623                for_each_engine(smoke->engine, smoke->gt, id) {
3624                        struct i915_gem_context *ctx = smoke_context(smoke);
3625                        int err;
3626
3627                        err = smoke_submit(smoke,
3628                                           ctx, random_priority(&smoke->prng),
3629                                           flags & BATCH ? smoke->batch : NULL);
3630                        if (err)
3631                                return err;
3632
3633                        count++;
3634                }
3635        } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
3636
3637        pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
3638                count, flags, smoke->gt->info.num_engines, smoke->ncontext);
3639        return 0;
3640}
3641
3642static int live_preempt_smoke(void *arg)
3643{
3644        struct preempt_smoke smoke = {
3645                .gt = arg,
3646                .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
3647                .ncontext = 256,
3648        };
3649        const unsigned int phase[] = { 0, BATCH };
3650        struct igt_live_test t;
3651        int err = -ENOMEM;
3652        u32 *cs;
3653        int n;
3654
3655        smoke.contexts = kmalloc_array(smoke.ncontext,
3656                                       sizeof(*smoke.contexts),
3657                                       GFP_KERNEL);
3658        if (!smoke.contexts)
3659                return -ENOMEM;
3660
3661        smoke.batch =
3662                i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
3663        if (IS_ERR(smoke.batch)) {
3664                err = PTR_ERR(smoke.batch);
3665                goto err_free;
3666        }
3667
3668        cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
3669        if (IS_ERR(cs)) {
3670                err = PTR_ERR(cs);
3671                goto err_batch;
3672        }
3673        for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
3674                cs[n] = MI_ARB_CHECK;
3675        cs[n] = MI_BATCH_BUFFER_END;
3676        i915_gem_object_flush_map(smoke.batch);
3677        i915_gem_object_unpin_map(smoke.batch);
3678
3679        if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
3680                err = -EIO;
3681                goto err_batch;
3682        }
3683
3684        for (n = 0; n < smoke.ncontext; n++) {
3685                smoke.contexts[n] = kernel_context(smoke.gt->i915, NULL);
3686                if (!smoke.contexts[n])
3687                        goto err_ctx;
3688        }
3689
3690        for (n = 0; n < ARRAY_SIZE(phase); n++) {
3691                err = smoke_crescendo(&smoke, phase[n]);
3692                if (err)
3693                        goto err_ctx;
3694
3695                err = smoke_random(&smoke, phase[n]);
3696                if (err)
3697                        goto err_ctx;
3698        }
3699
3700err_ctx:
3701        if (igt_live_test_end(&t))
3702                err = -EIO;
3703
3704        for (n = 0; n < smoke.ncontext; n++) {
3705                if (!smoke.contexts[n])
3706                        break;
3707                kernel_context_close(smoke.contexts[n]);
3708        }
3709
3710err_batch:
3711        i915_gem_object_put(smoke.batch);
3712err_free:
3713        kfree(smoke.contexts);
3714
3715        return err;
3716}
3717
3718static int nop_virtual_engine(struct intel_gt *gt,
3719                              struct intel_engine_cs **siblings,
3720                              unsigned int nsibling,
3721                              unsigned int nctx,
3722                              unsigned int flags)
3723#define CHAIN BIT(0)
3724{
3725        IGT_TIMEOUT(end_time);
3726        struct i915_request *request[16] = {};
3727        struct intel_context *ve[16];
3728        unsigned long n, prime, nc;
3729        struct igt_live_test t;
3730        ktime_t times[2] = {};
3731        int err;
3732
3733        GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
3734
3735        for (n = 0; n < nctx; n++) {
3736                ve[n] = intel_engine_create_virtual(siblings, nsibling, 0);
3737                if (IS_ERR(ve[n])) {
3738                        err = PTR_ERR(ve[n]);
3739                        nctx = n;
3740                        goto out;
3741                }
3742
3743                err = intel_context_pin(ve[n]);
3744                if (err) {
3745                        intel_context_put(ve[n]);
3746                        nctx = n;
3747                        goto out;
3748                }
3749        }
3750
3751        err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
3752        if (err)
3753                goto out;
3754
3755        for_each_prime_number_from(prime, 1, 8192) {
3756                times[1] = ktime_get_raw();
3757
3758                if (flags & CHAIN) {
3759                        for (nc = 0; nc < nctx; nc++) {
3760                                for (n = 0; n < prime; n++) {
3761                                        struct i915_request *rq;
3762
3763                                        rq = i915_request_create(ve[nc]);
3764                                        if (IS_ERR(rq)) {
3765                                                err = PTR_ERR(rq);
3766                                                goto out;
3767                                        }
3768
3769                                        if (request[nc])
3770                                                i915_request_put(request[nc]);
3771                                        request[nc] = i915_request_get(rq);
3772                                        i915_request_add(rq);
3773                                }
3774                        }
3775                } else {
3776                        for (n = 0; n < prime; n++) {
3777                                for (nc = 0; nc < nctx; nc++) {
3778                                        struct i915_request *rq;
3779
3780                                        rq = i915_request_create(ve[nc]);
3781                                        if (IS_ERR(rq)) {
3782                                                err = PTR_ERR(rq);
3783                                                goto out;
3784                                        }
3785
3786                                        if (request[nc])
3787                                                i915_request_put(request[nc]);
3788                                        request[nc] = i915_request_get(rq);
3789                                        i915_request_add(rq);
3790                                }
3791                        }
3792                }
3793
3794                for (nc = 0; nc < nctx; nc++) {
3795                        if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
3796                                pr_err("%s(%s): wait for %llx:%lld timed out\n",
3797                                       __func__, ve[0]->engine->name,
3798                                       request[nc]->fence.context,
3799                                       request[nc]->fence.seqno);
3800
3801                                GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
3802                                          __func__, ve[0]->engine->name,
3803                                          request[nc]->fence.context,
3804                                          request[nc]->fence.seqno);
3805                                GEM_TRACE_DUMP();
3806                                intel_gt_set_wedged(gt);
3807                                break;
3808                        }
3809                }
3810
3811                times[1] = ktime_sub(ktime_get_raw(), times[1]);
3812                if (prime == 1)
3813                        times[0] = times[1];
3814
3815                for (nc = 0; nc < nctx; nc++) {
3816                        i915_request_put(request[nc]);
3817                        request[nc] = NULL;
3818                }
3819
3820                if (__igt_timeout(end_time, NULL))
3821                        break;
3822        }
3823
3824        err = igt_live_test_end(&t);
3825        if (err)
3826                goto out;
3827
3828        pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
3829                nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
3830                prime, div64_u64(ktime_to_ns(times[1]), prime));
3831
3832out:
3833        if (igt_flush_test(gt->i915))
3834                err = -EIO;
3835
3836        for (nc = 0; nc < nctx; nc++) {
3837                i915_request_put(request[nc]);
3838                intel_context_unpin(ve[nc]);
3839                intel_context_put(ve[nc]);
3840        }
3841        return err;
3842}
3843
3844static unsigned int
3845__select_siblings(struct intel_gt *gt,
3846                  unsigned int class,
3847                  struct intel_engine_cs **siblings,
3848                  bool (*filter)(const struct intel_engine_cs *))
3849{
3850        unsigned int n = 0;
3851        unsigned int inst;
3852
3853        for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
3854                if (!gt->engine_class[class][inst])
3855                        continue;
3856
3857                if (filter && !filter(gt->engine_class[class][inst]))
3858                        continue;
3859
3860                siblings[n++] = gt->engine_class[class][inst];
3861        }
3862
3863        return n;
3864}
3865
3866static unsigned int
3867select_siblings(struct intel_gt *gt,
3868                unsigned int class,
3869                struct intel_engine_cs **siblings)
3870{
3871        return __select_siblings(gt, class, siblings, NULL);
3872}
3873
3874static int live_virtual_engine(void *arg)
3875{
3876        struct intel_gt *gt = arg;
3877        struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
3878        struct intel_engine_cs *engine;
3879        enum intel_engine_id id;
3880        unsigned int class;
3881        int err;
3882
3883        if (intel_uc_uses_guc_submission(&gt->uc))
3884                return 0;
3885
3886        for_each_engine(engine, gt, id) {
3887                err = nop_virtual_engine(gt, &engine, 1, 1, 0);
3888                if (err) {
3889                        pr_err("Failed to wrap engine %s: err=%d\n",
3890                               engine->name, err);
3891                        return err;
3892                }
3893        }
3894
3895        for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
3896                int nsibling, n;
3897
3898                nsibling = select_siblings(gt, class, siblings);
3899                if (nsibling < 2)
3900                        continue;
3901
3902                for (n = 1; n <= nsibling + 1; n++) {
3903                        err = nop_virtual_engine(gt, siblings, nsibling,
3904                                                 n, 0);
3905                        if (err)
3906                                return err;
3907                }
3908
3909                err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
3910                if (err)
3911                        return err;
3912        }
3913
3914        return 0;
3915}
3916
3917static int mask_virtual_engine(struct intel_gt *gt,
3918                               struct intel_engine_cs **siblings,
3919                               unsigned int nsibling)
3920{
3921        struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
3922        struct intel_context *ve;
3923        struct igt_live_test t;
3924        unsigned int n;
3925        int err;
3926
3927        /*
3928         * Check that by setting the execution mask on a request, we can
3929         * restrict it to our desired engine within the virtual engine.
3930         */
3931
3932        ve = intel_engine_create_virtual(siblings, nsibling, 0);
3933        if (IS_ERR(ve)) {
3934                err = PTR_ERR(ve);
3935                goto out_close;
3936        }
3937
3938        err = intel_context_pin(ve);
3939        if (err)
3940                goto out_put;
3941
3942        err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
3943        if (err)
3944                goto out_unpin;
3945
3946        for (n = 0; n < nsibling; n++) {
3947                request[n] = i915_request_create(ve);
3948                if (IS_ERR(request[n])) {
3949                        err = PTR_ERR(request[n]);
3950                        nsibling = n;
3951                        goto out;
3952                }
3953
3954                /* Reverse order as it's more likely to be unnatural */
3955                request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
3956
3957                i915_request_get(request[n]);
3958                i915_request_add(request[n]);
3959        }
3960
3961        for (n = 0; n < nsibling; n++) {
3962                if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
3963                        pr_err("%s(%s): wait for %llx:%lld timed out\n",
3964                               __func__, ve->engine->name,
3965                               request[n]->fence.context,
3966                               request[n]->fence.seqno);
3967
3968                        GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
3969                                  __func__, ve->engine->name,
3970                                  request[n]->fence.context,
3971                                  request[n]->fence.seqno);
3972                        GEM_TRACE_DUMP();
3973                        intel_gt_set_wedged(gt);
3974                        err = -EIO;
3975                        goto out;
3976                }
3977
3978                if (request[n]->engine != siblings[nsibling - n - 1]) {
3979                        pr_err("Executed on wrong sibling '%s', expected '%s'\n",
3980                               request[n]->engine->name,
3981                               siblings[nsibling - n - 1]->name);
3982                        err = -EINVAL;
3983                        goto out;
3984                }
3985        }
3986
3987        err = igt_live_test_end(&t);
3988out:
3989        if (igt_flush_test(gt->i915))
3990                err = -EIO;
3991
3992        for (n = 0; n < nsibling; n++)
3993                i915_request_put(request[n]);
3994
3995out_unpin:
3996        intel_context_unpin(ve);
3997out_put:
3998        intel_context_put(ve);
3999out_close:
4000        return err;
4001}
4002
4003static int live_virtual_mask(void *arg)
4004{
4005        struct intel_gt *gt = arg;
4006        struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4007        unsigned int class;
4008        int err;
4009
4010        if (intel_uc_uses_guc_submission(&gt->uc))
4011                return 0;
4012
4013        for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4014                unsigned int nsibling;
4015
4016                nsibling = select_siblings(gt, class, siblings);
4017                if (nsibling < 2)
4018                        continue;
4019
4020                err = mask_virtual_engine(gt, siblings, nsibling);
4021                if (err)
4022                        return err;
4023        }
4024
4025        return 0;
4026}
4027
4028static int slicein_virtual_engine(struct intel_gt *gt,
4029                                  struct intel_engine_cs **siblings,
4030                                  unsigned int nsibling)
4031{
4032        const long timeout = slice_timeout(siblings[0]);
4033        struct intel_context *ce;
4034        struct i915_request *rq;
4035        struct igt_spinner spin;
4036        unsigned int n;
4037        int err = 0;
4038
4039        /*
4040         * Virtual requests must take part in timeslicing on the target engines.
4041         */
4042
4043        if (igt_spinner_init(&spin, gt))
4044                return -ENOMEM;
4045
4046        for (n = 0; n < nsibling; n++) {
4047                ce = intel_context_create(siblings[n]);
4048                if (IS_ERR(ce)) {
4049                        err = PTR_ERR(ce);
4050                        goto out;
4051                }
4052
4053                rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4054                intel_context_put(ce);
4055                if (IS_ERR(rq)) {
4056                        err = PTR_ERR(rq);
4057                        goto out;
4058                }
4059
4060                i915_request_add(rq);
4061        }
4062
4063        ce = intel_engine_create_virtual(siblings, nsibling, 0);
4064        if (IS_ERR(ce)) {
4065                err = PTR_ERR(ce);
4066                goto out;
4067        }
4068
4069        rq = intel_context_create_request(ce);
4070        intel_context_put(ce);
4071        if (IS_ERR(rq)) {
4072                err = PTR_ERR(rq);
4073                goto out;
4074        }
4075
4076        i915_request_get(rq);
4077        i915_request_add(rq);
4078        if (i915_request_wait(rq, 0, timeout) < 0) {
4079                GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
4080                              __func__, rq->engine->name);
4081                GEM_TRACE_DUMP();
4082                intel_gt_set_wedged(gt);
4083                err = -EIO;
4084        }
4085        i915_request_put(rq);
4086
4087out:
4088        igt_spinner_end(&spin);
4089        if (igt_flush_test(gt->i915))
4090                err = -EIO;
4091        igt_spinner_fini(&spin);
4092        return err;
4093}
4094
4095static int sliceout_virtual_engine(struct intel_gt *gt,
4096                                   struct intel_engine_cs **siblings,
4097                                   unsigned int nsibling)
4098{
4099        const long timeout = slice_timeout(siblings[0]);
4100        struct intel_context *ce;
4101        struct i915_request *rq;
4102        struct igt_spinner spin;
4103        unsigned int n;
4104        int err = 0;
4105
4106        /*
4107         * Virtual requests must allow others a fair timeslice.
4108         */
4109
4110        if (igt_spinner_init(&spin, gt))
4111                return -ENOMEM;
4112
4113        /* XXX We do not handle oversubscription and fairness with normal rq */
4114        for (n = 0; n < nsibling; n++) {
4115                ce = intel_engine_create_virtual(siblings, nsibling, 0);
4116                if (IS_ERR(ce)) {
4117                        err = PTR_ERR(ce);
4118                        goto out;
4119                }
4120
4121                rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4122                intel_context_put(ce);
4123                if (IS_ERR(rq)) {
4124                        err = PTR_ERR(rq);
4125                        goto out;
4126                }
4127
4128                i915_request_add(rq);
4129        }
4130
4131        for (n = 0; !err && n < nsibling; n++) {
4132                ce = intel_context_create(siblings[n]);
4133                if (IS_ERR(ce)) {
4134                        err = PTR_ERR(ce);
4135                        goto out;
4136                }
4137
4138                rq = intel_context_create_request(ce);
4139                intel_context_put(ce);
4140                if (IS_ERR(rq)) {
4141                        err = PTR_ERR(rq);
4142                        goto out;
4143                }
4144
4145                i915_request_get(rq);
4146                i915_request_add(rq);
4147                if (i915_request_wait(rq, 0, timeout) < 0) {
4148                        GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
4149                                      __func__, siblings[n]->name);
4150                        GEM_TRACE_DUMP();
4151                        intel_gt_set_wedged(gt);
4152                        err = -EIO;
4153                }
4154                i915_request_put(rq);
4155        }
4156
4157out:
4158        igt_spinner_end(&spin);
4159        if (igt_flush_test(gt->i915))
4160                err = -EIO;
4161        igt_spinner_fini(&spin);
4162        return err;
4163}
4164
4165static int live_virtual_slice(void *arg)
4166{
4167        struct intel_gt *gt = arg;
4168        struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4169        unsigned int class;
4170        int err;
4171
4172        if (intel_uc_uses_guc_submission(&gt->uc))
4173                return 0;
4174
4175        for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4176                unsigned int nsibling;
4177
4178                nsibling = __select_siblings(gt, class, siblings,
4179                                             intel_engine_has_timeslices);
4180                if (nsibling < 2)
4181                        continue;
4182
4183                err = slicein_virtual_engine(gt, siblings, nsibling);
4184                if (err)
4185                        return err;
4186
4187                err = sliceout_virtual_engine(gt, siblings, nsibling);
4188                if (err)
4189                        return err;
4190        }
4191
4192        return 0;
4193}
4194
4195static int preserved_virtual_engine(struct intel_gt *gt,
4196                                    struct intel_engine_cs **siblings,
4197                                    unsigned int nsibling)
4198{
4199        struct i915_request *last = NULL;
4200        struct intel_context *ve;
4201        struct i915_vma *scratch;
4202        struct igt_live_test t;
4203        unsigned int n;
4204        int err = 0;
4205        u32 *cs;
4206
4207        scratch =
4208                __vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
4209                                                    PAGE_SIZE);
4210        if (IS_ERR(scratch))
4211                return PTR_ERR(scratch);
4212
4213        err = i915_vma_sync(scratch);
4214        if (err)
4215                goto out_scratch;
4216
4217        ve = intel_engine_create_virtual(siblings, nsibling, 0);
4218        if (IS_ERR(ve)) {
4219                err = PTR_ERR(ve);
4220                goto out_scratch;
4221        }
4222
4223        err = intel_context_pin(ve);
4224        if (err)
4225                goto out_put;
4226
4227        err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
4228        if (err)
4229                goto out_unpin;
4230
4231        for (n = 0; n < NUM_GPR_DW; n++) {
4232                struct intel_engine_cs *engine = siblings[n % nsibling];
4233                struct i915_request *rq;
4234
4235                rq = i915_request_create(ve);
4236                if (IS_ERR(rq)) {
4237                        err = PTR_ERR(rq);
4238                        goto out_end;
4239                }
4240
4241                i915_request_put(last);
4242                last = i915_request_get(rq);
4243
4244                cs = intel_ring_begin(rq, 8);
4245                if (IS_ERR(cs)) {
4246                        i915_request_add(rq);
4247                        err = PTR_ERR(cs);
4248                        goto out_end;
4249                }
4250
4251                *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
4252                *cs++ = CS_GPR(engine, n);
4253                *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
4254                *cs++ = 0;
4255
4256                *cs++ = MI_LOAD_REGISTER_IMM(1);
4257                *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
4258                *cs++ = n + 1;
4259
4260                *cs++ = MI_NOOP;
4261                intel_ring_advance(rq, cs);
4262
4263                /* Restrict this request to run on a particular engine */
4264                rq->execution_mask = engine->mask;
4265                i915_request_add(rq);
4266        }
4267
4268        if (i915_request_wait(last, 0, HZ / 5) < 0) {
4269                err = -ETIME;
4270                goto out_end;
4271        }
4272
4273        cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
4274        if (IS_ERR(cs)) {
4275                err = PTR_ERR(cs);
4276                goto out_end;
4277        }
4278
4279        for (n = 0; n < NUM_GPR_DW; n++) {
4280                if (cs[n] != n) {
4281                        pr_err("Incorrect value[%d] found for GPR[%d]\n",
4282                               cs[n], n);
4283                        err = -EINVAL;
4284                        break;
4285                }
4286        }
4287
4288        i915_gem_object_unpin_map(scratch->obj);
4289
4290out_end:
4291        if (igt_live_test_end(&t))
4292                err = -EIO;
4293        i915_request_put(last);
4294out_unpin:
4295        intel_context_unpin(ve);
4296out_put:
4297        intel_context_put(ve);
4298out_scratch:
4299        i915_vma_unpin_and_release(&scratch, 0);
4300        return err;
4301}
4302
4303static int live_virtual_preserved(void *arg)
4304{
4305        struct intel_gt *gt = arg;
4306        struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4307        unsigned int class;
4308
4309        /*
4310         * Check that the context image retains non-privileged (user) registers
4311         * from one engine to the next. For this we check that the CS_GPR
4312         * are preserved.
4313         */
4314
4315        if (intel_uc_uses_guc_submission(&gt->uc))
4316                return 0;
4317
4318        /* As we use CS_GPR we cannot run before they existed on all engines. */
4319        if (GRAPHICS_VER(gt->i915) < 9)
4320                return 0;
4321
4322        for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4323                int nsibling, err;
4324
4325                nsibling = select_siblings(gt, class, siblings);
4326                if (nsibling < 2)
4327                        continue;
4328
4329                err = preserved_virtual_engine(gt, siblings, nsibling);
4330                if (err)
4331                        return err;
4332        }
4333
4334        return 0;
4335}
4336
4337static int reset_virtual_engine(struct intel_gt *gt,
4338                                struct intel_engine_cs **siblings,
4339                                unsigned int nsibling)
4340{
4341        struct intel_engine_cs *engine;
4342        struct intel_context *ve;
4343        struct igt_spinner spin;
4344        struct i915_request *rq;
4345        unsigned int n;
4346        int err = 0;
4347
4348        /*
4349         * In order to support offline error capture for fast preempt reset,
4350         * we need to decouple the guilty request and ensure that it and its
4351         * descendents are not executed while the capture is in progress.
4352         */
4353
4354        if (igt_spinner_init(&spin, gt))
4355                return -ENOMEM;
4356
4357        ve = intel_engine_create_virtual(siblings, nsibling, 0);
4358        if (IS_ERR(ve)) {
4359                err = PTR_ERR(ve);
4360                goto out_spin;
4361        }
4362
4363        for (n = 0; n < nsibling; n++)
4364                st_engine_heartbeat_disable(siblings[n]);
4365
4366        rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
4367        if (IS_ERR(rq)) {
4368                err = PTR_ERR(rq);
4369                goto out_heartbeat;
4370        }
4371        i915_request_add(rq);
4372
4373        if (!igt_wait_for_spinner(&spin, rq)) {
4374                intel_gt_set_wedged(gt);
4375                err = -ETIME;
4376                goto out_heartbeat;
4377        }
4378
4379        engine = rq->engine;
4380        GEM_BUG_ON(engine == ve->engine);
4381
4382        /* Take ownership of the reset and tasklet */
4383        err = engine_lock_reset_tasklet(engine);
4384        if (err)
4385                goto out_heartbeat;
4386
4387        engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
4388        GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
4389
4390        /* Fake a preemption event; failed of course */
4391        spin_lock_irq(&engine->sched_engine->lock);
4392        __unwind_incomplete_requests(engine);
4393        spin_unlock_irq(&engine->sched_engine->lock);
4394        GEM_BUG_ON(rq->engine != engine);
4395
4396        /* Reset the engine while keeping our active request on hold */
4397        execlists_hold(engine, rq);
4398        GEM_BUG_ON(!i915_request_on_hold(rq));
4399
4400        __intel_engine_reset_bh(engine, NULL);
4401        GEM_BUG_ON(rq->fence.error != -EIO);
4402
4403        /* Release our grasp on the engine, letting CS flow again */
4404        engine_unlock_reset_tasklet(engine);
4405
4406        /* Check that we do not resubmit the held request */
4407        i915_request_get(rq);
4408        if (!i915_request_wait(rq, 0, HZ / 5)) {
4409                pr_err("%s: on hold request completed!\n",
4410                       engine->name);
4411                intel_gt_set_wedged(gt);
4412                err = -EIO;
4413                goto out_rq;
4414        }
4415        GEM_BUG_ON(!i915_request_on_hold(rq));
4416
4417        /* But is resubmitted on release */
4418        execlists_unhold(engine, rq);
4419        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
4420                pr_err("%s: held request did not complete!\n",
4421                       engine->name);
4422                intel_gt_set_wedged(gt);
4423                err = -ETIME;
4424        }
4425
4426out_rq:
4427        i915_request_put(rq);
4428out_heartbeat:
4429        for (n = 0; n < nsibling; n++)
4430                st_engine_heartbeat_enable(siblings[n]);
4431
4432        intel_context_put(ve);
4433out_spin:
4434        igt_spinner_fini(&spin);
4435        return err;
4436}
4437
4438static int live_virtual_reset(void *arg)
4439{
4440        struct intel_gt *gt = arg;
4441        struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
4442        unsigned int class;
4443
4444        /*
4445         * Check that we handle a reset event within a virtual engine.
4446         * Only the physical engine is reset, but we have to check the flow
4447         * of the virtual requests around the reset, and make sure it is not
4448         * forgotten.
4449         */
4450
4451        if (intel_uc_uses_guc_submission(&gt->uc))
4452                return 0;
4453
4454        if (!intel_has_reset_engine(gt))
4455                return 0;
4456
4457        for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
4458                int nsibling, err;
4459
4460                nsibling = select_siblings(gt, class, siblings);
4461                if (nsibling < 2)
4462                        continue;
4463
4464                err = reset_virtual_engine(gt, siblings, nsibling);
4465                if (err)
4466                        return err;
4467        }
4468
4469        return 0;
4470}
4471
4472int intel_execlists_live_selftests(struct drm_i915_private *i915)
4473{
4474        static const struct i915_subtest tests[] = {
4475                SUBTEST(live_sanitycheck),
4476                SUBTEST(live_unlite_switch),
4477                SUBTEST(live_unlite_preempt),
4478                SUBTEST(live_unlite_ring),
4479                SUBTEST(live_pin_rewind),
4480                SUBTEST(live_hold_reset),
4481                SUBTEST(live_error_interrupt),
4482                SUBTEST(live_timeslice_preempt),
4483                SUBTEST(live_timeslice_rewind),
4484                SUBTEST(live_timeslice_queue),
4485                SUBTEST(live_timeslice_nopreempt),
4486                SUBTEST(live_busywait_preempt),
4487                SUBTEST(live_preempt),
4488                SUBTEST(live_late_preempt),
4489                SUBTEST(live_nopreempt),
4490                SUBTEST(live_preempt_cancel),
4491                SUBTEST(live_suppress_self_preempt),
4492                SUBTEST(live_chain_preempt),
4493                SUBTEST(live_preempt_ring),
4494                SUBTEST(live_preempt_gang),
4495                SUBTEST(live_preempt_timeout),
4496                SUBTEST(live_preempt_user),
4497                SUBTEST(live_preempt_smoke),
4498                SUBTEST(live_virtual_engine),
4499                SUBTEST(live_virtual_mask),
4500                SUBTEST(live_virtual_preserved),
4501                SUBTEST(live_virtual_slice),
4502                SUBTEST(live_virtual_reset),
4503        };
4504
4505        if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP)
4506                return 0;
4507
4508        if (intel_gt_is_wedged(to_gt(i915)))
4509                return 0;
4510
4511        return intel_gt_live_subtests(tests, to_gt(i915));
4512}
4513