linux/drivers/gpu/drm/i915/gt/intel_engine_cs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2016 Intel Corporation
   4 */
   5
   6#include <drm/drm_print.h>
   7
   8#include "gem/i915_gem_context.h"
   9
  10#include "i915_drv.h"
  11
  12#include "intel_breadcrumbs.h"
  13#include "intel_context.h"
  14#include "intel_engine.h"
  15#include "intel_engine_pm.h"
  16#include "intel_engine_user.h"
  17#include "intel_execlists_submission.h"
  18#include "intel_gt.h"
  19#include "intel_gt_requests.h"
  20#include "intel_gt_pm.h"
  21#include "intel_lrc_reg.h"
  22#include "intel_reset.h"
  23#include "intel_ring.h"
  24#include "uc/intel_guc_submission.h"
  25
  26/* Haswell does have the CXT_SIZE register however it does not appear to be
  27 * valid. Now, docs explain in dwords what is in the context object. The full
  28 * size is 70720 bytes, however, the power context and execlist context will
  29 * never be saved (power context is stored elsewhere, and execlists don't work
  30 * on HSW) - so the final size, including the extra state required for the
  31 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  32 */
  33#define HSW_CXT_TOTAL_SIZE              (17 * PAGE_SIZE)
  34
  35#define DEFAULT_LR_CONTEXT_RENDER_SIZE  (22 * PAGE_SIZE)
  36#define GEN8_LR_CONTEXT_RENDER_SIZE     (20 * PAGE_SIZE)
  37#define GEN9_LR_CONTEXT_RENDER_SIZE     (22 * PAGE_SIZE)
  38#define GEN11_LR_CONTEXT_RENDER_SIZE    (14 * PAGE_SIZE)
  39
  40#define GEN8_LR_CONTEXT_OTHER_SIZE      ( 2 * PAGE_SIZE)
  41
  42#define MAX_MMIO_BASES 3
  43struct engine_info {
  44        u8 class;
  45        u8 instance;
  46        /* mmio bases table *must* be sorted in reverse graphics_ver order */
  47        struct engine_mmio_base {
  48                u32 graphics_ver : 8;
  49                u32 base : 24;
  50        } mmio_bases[MAX_MMIO_BASES];
  51};
  52
  53static const struct engine_info intel_engines[] = {
  54        [RCS0] = {
  55                .class = RENDER_CLASS,
  56                .instance = 0,
  57                .mmio_bases = {
  58                        { .graphics_ver = 1, .base = RENDER_RING_BASE }
  59                },
  60        },
  61        [BCS0] = {
  62                .class = COPY_ENGINE_CLASS,
  63                .instance = 0,
  64                .mmio_bases = {
  65                        { .graphics_ver = 6, .base = BLT_RING_BASE }
  66                },
  67        },
  68        [VCS0] = {
  69                .class = VIDEO_DECODE_CLASS,
  70                .instance = 0,
  71                .mmio_bases = {
  72                        { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
  73                        { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
  74                        { .graphics_ver = 4, .base = BSD_RING_BASE }
  75                },
  76        },
  77        [VCS1] = {
  78                .class = VIDEO_DECODE_CLASS,
  79                .instance = 1,
  80                .mmio_bases = {
  81                        { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
  82                        { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
  83                },
  84        },
  85        [VCS2] = {
  86                .class = VIDEO_DECODE_CLASS,
  87                .instance = 2,
  88                .mmio_bases = {
  89                        { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
  90                },
  91        },
  92        [VCS3] = {
  93                .class = VIDEO_DECODE_CLASS,
  94                .instance = 3,
  95                .mmio_bases = {
  96                        { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
  97                },
  98        },
  99        [VCS4] = {
 100                .class = VIDEO_DECODE_CLASS,
 101                .instance = 4,
 102                .mmio_bases = {
 103                        { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
 104                },
 105        },
 106        [VCS5] = {
 107                .class = VIDEO_DECODE_CLASS,
 108                .instance = 5,
 109                .mmio_bases = {
 110                        { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
 111                },
 112        },
 113        [VCS6] = {
 114                .class = VIDEO_DECODE_CLASS,
 115                .instance = 6,
 116                .mmio_bases = {
 117                        { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
 118                },
 119        },
 120        [VCS7] = {
 121                .class = VIDEO_DECODE_CLASS,
 122                .instance = 7,
 123                .mmio_bases = {
 124                        { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
 125                },
 126        },
 127        [VECS0] = {
 128                .class = VIDEO_ENHANCEMENT_CLASS,
 129                .instance = 0,
 130                .mmio_bases = {
 131                        { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
 132                        { .graphics_ver = 7, .base = VEBOX_RING_BASE }
 133                },
 134        },
 135        [VECS1] = {
 136                .class = VIDEO_ENHANCEMENT_CLASS,
 137                .instance = 1,
 138                .mmio_bases = {
 139                        { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
 140                },
 141        },
 142        [VECS2] = {
 143                .class = VIDEO_ENHANCEMENT_CLASS,
 144                .instance = 2,
 145                .mmio_bases = {
 146                        { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
 147                },
 148        },
 149        [VECS3] = {
 150                .class = VIDEO_ENHANCEMENT_CLASS,
 151                .instance = 3,
 152                .mmio_bases = {
 153                        { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
 154                },
 155        },
 156};
 157
 158/**
 159 * intel_engine_context_size() - return the size of the context for an engine
 160 * @gt: the gt
 161 * @class: engine class
 162 *
 163 * Each engine class may require a different amount of space for a context
 164 * image.
 165 *
 166 * Return: size (in bytes) of an engine class specific context image
 167 *
 168 * Note: this size includes the HWSP, which is part of the context image
 169 * in LRC mode, but does not include the "shared data page" used with
 170 * GuC submission. The caller should account for this if using the GuC.
 171 */
 172u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
 173{
 174        struct intel_uncore *uncore = gt->uncore;
 175        u32 cxt_size;
 176
 177        BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
 178
 179        switch (class) {
 180        case RENDER_CLASS:
 181                switch (GRAPHICS_VER(gt->i915)) {
 182                default:
 183                        MISSING_CASE(GRAPHICS_VER(gt->i915));
 184                        return DEFAULT_LR_CONTEXT_RENDER_SIZE;
 185                case 12:
 186                case 11:
 187                        return GEN11_LR_CONTEXT_RENDER_SIZE;
 188                case 9:
 189                        return GEN9_LR_CONTEXT_RENDER_SIZE;
 190                case 8:
 191                        return GEN8_LR_CONTEXT_RENDER_SIZE;
 192                case 7:
 193                        if (IS_HASWELL(gt->i915))
 194                                return HSW_CXT_TOTAL_SIZE;
 195
 196                        cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
 197                        return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
 198                                        PAGE_SIZE);
 199                case 6:
 200                        cxt_size = intel_uncore_read(uncore, CXT_SIZE);
 201                        return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
 202                                        PAGE_SIZE);
 203                case 5:
 204                case 4:
 205                        /*
 206                         * There is a discrepancy here between the size reported
 207                         * by the register and the size of the context layout
 208                         * in the docs. Both are described as authorative!
 209                         *
 210                         * The discrepancy is on the order of a few cachelines,
 211                         * but the total is under one page (4k), which is our
 212                         * minimum allocation anyway so it should all come
 213                         * out in the wash.
 214                         */
 215                        cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
 216                        drm_dbg(&gt->i915->drm,
 217                                "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
 218                                GRAPHICS_VER(gt->i915), cxt_size * 64,
 219                                cxt_size - 1);
 220                        return round_up(cxt_size * 64, PAGE_SIZE);
 221                case 3:
 222                case 2:
 223                /* For the special day when i810 gets merged. */
 224                case 1:
 225                        return 0;
 226                }
 227                break;
 228        default:
 229                MISSING_CASE(class);
 230                fallthrough;
 231        case VIDEO_DECODE_CLASS:
 232        case VIDEO_ENHANCEMENT_CLASS:
 233        case COPY_ENGINE_CLASS:
 234                if (GRAPHICS_VER(gt->i915) < 8)
 235                        return 0;
 236                return GEN8_LR_CONTEXT_OTHER_SIZE;
 237        }
 238}
 239
 240static u32 __engine_mmio_base(struct drm_i915_private *i915,
 241                              const struct engine_mmio_base *bases)
 242{
 243        int i;
 244
 245        for (i = 0; i < MAX_MMIO_BASES; i++)
 246                if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
 247                        break;
 248
 249        GEM_BUG_ON(i == MAX_MMIO_BASES);
 250        GEM_BUG_ON(!bases[i].base);
 251
 252        return bases[i].base;
 253}
 254
 255static void __sprint_engine_name(struct intel_engine_cs *engine)
 256{
 257        /*
 258         * Before we know what the uABI name for this engine will be,
 259         * we still would like to keep track of this engine in the debug logs.
 260         * We throw in a ' here as a reminder that this isn't its final name.
 261         */
 262        GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
 263                             intel_engine_class_repr(engine->class),
 264                             engine->instance) >= sizeof(engine->name));
 265}
 266
 267void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
 268{
 269        /*
 270         * Though they added more rings on g4x/ilk, they did not add
 271         * per-engine HWSTAM until gen6.
 272         */
 273        if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
 274                return;
 275
 276        if (GRAPHICS_VER(engine->i915) >= 3)
 277                ENGINE_WRITE(engine, RING_HWSTAM, mask);
 278        else
 279                ENGINE_WRITE16(engine, RING_HWSTAM, mask);
 280}
 281
 282static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
 283{
 284        /* Mask off all writes into the unknown HWSP */
 285        intel_engine_set_hwsp_writemask(engine, ~0u);
 286}
 287
 288static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
 289{
 290        GEM_DEBUG_WARN_ON(iir);
 291}
 292
 293static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 294{
 295        const struct engine_info *info = &intel_engines[id];
 296        struct drm_i915_private *i915 = gt->i915;
 297        struct intel_engine_cs *engine;
 298        u8 guc_class;
 299
 300        BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
 301        BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
 302        BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
 303        BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
 304
 305        if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
 306                return -EINVAL;
 307
 308        if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
 309                return -EINVAL;
 310
 311        if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
 312                return -EINVAL;
 313
 314        if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
 315                return -EINVAL;
 316
 317        engine = kzalloc(sizeof(*engine), GFP_KERNEL);
 318        if (!engine)
 319                return -ENOMEM;
 320
 321        BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
 322
 323        engine->id = id;
 324        engine->legacy_idx = INVALID_ENGINE;
 325        engine->mask = BIT(id);
 326        engine->i915 = i915;
 327        engine->gt = gt;
 328        engine->uncore = gt->uncore;
 329        guc_class = engine_class_to_guc_class(info->class);
 330        engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
 331        engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
 332
 333        engine->irq_handler = nop_irq_handler;
 334
 335        engine->class = info->class;
 336        engine->instance = info->instance;
 337        __sprint_engine_name(engine);
 338
 339        engine->props.heartbeat_interval_ms =
 340                CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
 341        engine->props.max_busywait_duration_ns =
 342                CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
 343        engine->props.preempt_timeout_ms =
 344                CONFIG_DRM_I915_PREEMPT_TIMEOUT;
 345        engine->props.stop_timeout_ms =
 346                CONFIG_DRM_I915_STOP_TIMEOUT;
 347        engine->props.timeslice_duration_ms =
 348                CONFIG_DRM_I915_TIMESLICE_DURATION;
 349
 350        /* Override to uninterruptible for OpenCL workloads. */
 351        if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
 352                engine->props.preempt_timeout_ms = 0;
 353
 354        engine->defaults = engine->props; /* never to change again */
 355
 356        engine->context_size = intel_engine_context_size(gt, engine->class);
 357        if (WARN_ON(engine->context_size > BIT(20)))
 358                engine->context_size = 0;
 359        if (engine->context_size)
 360                DRIVER_CAPS(i915)->has_logical_contexts = true;
 361
 362        ewma__engine_latency_init(&engine->latency);
 363        seqcount_init(&engine->stats.lock);
 364
 365        ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
 366
 367        /* Scrub mmio state on takeover */
 368        intel_engine_sanitize_mmio(engine);
 369
 370        gt->engine_class[info->class][info->instance] = engine;
 371        gt->engine[id] = engine;
 372
 373        return 0;
 374}
 375
 376static void __setup_engine_capabilities(struct intel_engine_cs *engine)
 377{
 378        struct drm_i915_private *i915 = engine->i915;
 379
 380        if (engine->class == VIDEO_DECODE_CLASS) {
 381                /*
 382                 * HEVC support is present on first engine instance
 383                 * before Gen11 and on all instances afterwards.
 384                 */
 385                if (GRAPHICS_VER(i915) >= 11 ||
 386                    (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
 387                        engine->uabi_capabilities |=
 388                                I915_VIDEO_CLASS_CAPABILITY_HEVC;
 389
 390                /*
 391                 * SFC block is present only on even logical engine
 392                 * instances.
 393                 */
 394                if ((GRAPHICS_VER(i915) >= 11 &&
 395                     (engine->gt->info.vdbox_sfc_access &
 396                      BIT(engine->instance))) ||
 397                    (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
 398                        engine->uabi_capabilities |=
 399                                I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
 400        } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
 401                if (GRAPHICS_VER(i915) >= 9)
 402                        engine->uabi_capabilities |=
 403                                I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
 404        }
 405}
 406
 407static void intel_setup_engine_capabilities(struct intel_gt *gt)
 408{
 409        struct intel_engine_cs *engine;
 410        enum intel_engine_id id;
 411
 412        for_each_engine(engine, gt, id)
 413                __setup_engine_capabilities(engine);
 414}
 415
 416/**
 417 * intel_engines_release() - free the resources allocated for Command Streamers
 418 * @gt: pointer to struct intel_gt
 419 */
 420void intel_engines_release(struct intel_gt *gt)
 421{
 422        struct intel_engine_cs *engine;
 423        enum intel_engine_id id;
 424
 425        /*
 426         * Before we release the resources held by engine, we must be certain
 427         * that the HW is no longer accessing them -- having the GPU scribble
 428         * to or read from a page being used for something else causes no end
 429         * of fun.
 430         *
 431         * The GPU should be reset by this point, but assume the worst just
 432         * in case we aborted before completely initialising the engines.
 433         */
 434        GEM_BUG_ON(intel_gt_pm_is_awake(gt));
 435        if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
 436                __intel_gt_reset(gt, ALL_ENGINES);
 437
 438        /* Decouple the backend; but keep the layout for late GPU resets */
 439        for_each_engine(engine, gt, id) {
 440                if (!engine->release)
 441                        continue;
 442
 443                intel_wakeref_wait_for_idle(&engine->wakeref);
 444                GEM_BUG_ON(intel_engine_pm_is_awake(engine));
 445
 446                engine->release(engine);
 447                engine->release = NULL;
 448
 449                memset(&engine->reset, 0, sizeof(engine->reset));
 450        }
 451}
 452
 453void intel_engine_free_request_pool(struct intel_engine_cs *engine)
 454{
 455        if (!engine->request_pool)
 456                return;
 457
 458        kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
 459}
 460
 461void intel_engines_free(struct intel_gt *gt)
 462{
 463        struct intel_engine_cs *engine;
 464        enum intel_engine_id id;
 465
 466        /* Free the requests! dma-resv keeps fences around for an eternity */
 467        rcu_barrier();
 468
 469        for_each_engine(engine, gt, id) {
 470                intel_engine_free_request_pool(engine);
 471                kfree(engine);
 472                gt->engine[id] = NULL;
 473        }
 474}
 475
 476static
 477bool gen11_vdbox_has_sfc(struct drm_i915_private *i915,
 478                         unsigned int physical_vdbox,
 479                         unsigned int logical_vdbox, u16 vdbox_mask)
 480{
 481        /*
 482         * In Gen11, only even numbered logical VDBOXes are hooked
 483         * up to an SFC (Scaler & Format Converter) unit.
 484         * In Gen12, Even numbered physical instance always are connected
 485         * to an SFC. Odd numbered physical instances have SFC only if
 486         * previous even instance is fused off.
 487         */
 488        if (GRAPHICS_VER(i915) == 12)
 489                return (physical_vdbox % 2 == 0) ||
 490                        !(BIT(physical_vdbox - 1) & vdbox_mask);
 491        else if (GRAPHICS_VER(i915) == 11)
 492                return logical_vdbox % 2 == 0;
 493
 494        MISSING_CASE(GRAPHICS_VER(i915));
 495        return false;
 496}
 497
 498/*
 499 * Determine which engines are fused off in our particular hardware.
 500 * Note that we have a catch-22 situation where we need to be able to access
 501 * the blitter forcewake domain to read the engine fuses, but at the same time
 502 * we need to know which engines are available on the system to know which
 503 * forcewake domains are present. We solve this by intializing the forcewake
 504 * domains based on the full engine mask in the platform capabilities before
 505 * calling this function and pruning the domains for fused-off engines
 506 * afterwards.
 507 */
 508static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
 509{
 510        struct drm_i915_private *i915 = gt->i915;
 511        struct intel_gt_info *info = &gt->info;
 512        struct intel_uncore *uncore = gt->uncore;
 513        unsigned int logical_vdbox = 0;
 514        unsigned int i;
 515        u32 media_fuse;
 516        u16 vdbox_mask;
 517        u16 vebox_mask;
 518
 519        info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
 520
 521        if (GRAPHICS_VER(i915) < 11)
 522                return info->engine_mask;
 523
 524        /*
 525         * On newer platforms the fusing register is called 'enable' and has
 526         * enable semantics, while on older platforms it is called 'disable'
 527         * and bits have disable semantices.
 528         */
 529        media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
 530        if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
 531                media_fuse = ~media_fuse;
 532
 533        vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
 534        vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
 535                      GEN11_GT_VEBOX_DISABLE_SHIFT;
 536
 537        for (i = 0; i < I915_MAX_VCS; i++) {
 538                if (!HAS_ENGINE(gt, _VCS(i))) {
 539                        vdbox_mask &= ~BIT(i);
 540                        continue;
 541                }
 542
 543                if (!(BIT(i) & vdbox_mask)) {
 544                        info->engine_mask &= ~BIT(_VCS(i));
 545                        drm_dbg(&i915->drm, "vcs%u fused off\n", i);
 546                        continue;
 547                }
 548
 549                if (gen11_vdbox_has_sfc(i915, i, logical_vdbox, vdbox_mask))
 550                        gt->info.vdbox_sfc_access |= BIT(i);
 551                logical_vdbox++;
 552        }
 553        drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
 554                vdbox_mask, VDBOX_MASK(gt));
 555        GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
 556
 557        for (i = 0; i < I915_MAX_VECS; i++) {
 558                if (!HAS_ENGINE(gt, _VECS(i))) {
 559                        vebox_mask &= ~BIT(i);
 560                        continue;
 561                }
 562
 563                if (!(BIT(i) & vebox_mask)) {
 564                        info->engine_mask &= ~BIT(_VECS(i));
 565                        drm_dbg(&i915->drm, "vecs%u fused off\n", i);
 566                }
 567        }
 568        drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
 569                vebox_mask, VEBOX_MASK(gt));
 570        GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
 571
 572        return info->engine_mask;
 573}
 574
 575/**
 576 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
 577 * @gt: pointer to struct intel_gt
 578 *
 579 * Return: non-zero if the initialization failed.
 580 */
 581int intel_engines_init_mmio(struct intel_gt *gt)
 582{
 583        struct drm_i915_private *i915 = gt->i915;
 584        const unsigned int engine_mask = init_engine_mask(gt);
 585        unsigned int mask = 0;
 586        unsigned int i;
 587        int err;
 588
 589        drm_WARN_ON(&i915->drm, engine_mask == 0);
 590        drm_WARN_ON(&i915->drm, engine_mask &
 591                    GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
 592
 593        if (i915_inject_probe_failure(i915))
 594                return -ENODEV;
 595
 596        for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
 597                if (!HAS_ENGINE(gt, i))
 598                        continue;
 599
 600                err = intel_engine_setup(gt, i);
 601                if (err)
 602                        goto cleanup;
 603
 604                mask |= BIT(i);
 605        }
 606
 607        /*
 608         * Catch failures to update intel_engines table when the new engines
 609         * are added to the driver by a warning and disabling the forgotten
 610         * engines.
 611         */
 612        if (drm_WARN_ON(&i915->drm, mask != engine_mask))
 613                gt->info.engine_mask = mask;
 614
 615        gt->info.num_engines = hweight32(mask);
 616
 617        intel_gt_check_and_clear_faults(gt);
 618
 619        intel_setup_engine_capabilities(gt);
 620
 621        intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
 622
 623        return 0;
 624
 625cleanup:
 626        intel_engines_free(gt);
 627        return err;
 628}
 629
 630void intel_engine_init_execlists(struct intel_engine_cs *engine)
 631{
 632        struct intel_engine_execlists * const execlists = &engine->execlists;
 633
 634        execlists->port_mask = 1;
 635        GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
 636        GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 637
 638        memset(execlists->pending, 0, sizeof(execlists->pending));
 639        execlists->active =
 640                memset(execlists->inflight, 0, sizeof(execlists->inflight));
 641}
 642
 643static void cleanup_status_page(struct intel_engine_cs *engine)
 644{
 645        struct i915_vma *vma;
 646
 647        /* Prevent writes into HWSP after returning the page to the system */
 648        intel_engine_set_hwsp_writemask(engine, ~0u);
 649
 650        vma = fetch_and_zero(&engine->status_page.vma);
 651        if (!vma)
 652                return;
 653
 654        if (!HWS_NEEDS_PHYSICAL(engine->i915))
 655                i915_vma_unpin(vma);
 656
 657        i915_gem_object_unpin_map(vma->obj);
 658        i915_gem_object_put(vma->obj);
 659}
 660
 661static int pin_ggtt_status_page(struct intel_engine_cs *engine,
 662                                struct i915_gem_ww_ctx *ww,
 663                                struct i915_vma *vma)
 664{
 665        unsigned int flags;
 666
 667        if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
 668                /*
 669                 * On g33, we cannot place HWS above 256MiB, so
 670                 * restrict its pinning to the low mappable arena.
 671                 * Though this restriction is not documented for
 672                 * gen4, gen5, or byt, they also behave similarly
 673                 * and hang if the HWS is placed at the top of the
 674                 * GTT. To generalise, it appears that all !llc
 675                 * platforms have issues with us placing the HWS
 676                 * above the mappable region (even though we never
 677                 * actually map it).
 678                 */
 679                flags = PIN_MAPPABLE;
 680        else
 681                flags = PIN_HIGH;
 682
 683        return i915_ggtt_pin(vma, ww, 0, flags);
 684}
 685
 686static int init_status_page(struct intel_engine_cs *engine)
 687{
 688        struct drm_i915_gem_object *obj;
 689        struct i915_gem_ww_ctx ww;
 690        struct i915_vma *vma;
 691        void *vaddr;
 692        int ret;
 693
 694        INIT_LIST_HEAD(&engine->status_page.timelines);
 695
 696        /*
 697         * Though the HWS register does support 36bit addresses, historically
 698         * we have had hangs and corruption reported due to wild writes if
 699         * the HWS is placed above 4G. We only allow objects to be allocated
 700         * in GFP_DMA32 for i965, and no earlier physical address users had
 701         * access to more than 4G.
 702         */
 703        obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
 704        if (IS_ERR(obj)) {
 705                drm_err(&engine->i915->drm,
 706                        "Failed to allocate status page\n");
 707                return PTR_ERR(obj);
 708        }
 709
 710        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 711
 712        vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
 713        if (IS_ERR(vma)) {
 714                ret = PTR_ERR(vma);
 715                goto err_put;
 716        }
 717
 718        i915_gem_ww_ctx_init(&ww, true);
 719retry:
 720        ret = i915_gem_object_lock(obj, &ww);
 721        if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
 722                ret = pin_ggtt_status_page(engine, &ww, vma);
 723        if (ret)
 724                goto err;
 725
 726        vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
 727        if (IS_ERR(vaddr)) {
 728                ret = PTR_ERR(vaddr);
 729                goto err_unpin;
 730        }
 731
 732        engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
 733        engine->status_page.vma = vma;
 734
 735err_unpin:
 736        if (ret)
 737                i915_vma_unpin(vma);
 738err:
 739        if (ret == -EDEADLK) {
 740                ret = i915_gem_ww_ctx_backoff(&ww);
 741                if (!ret)
 742                        goto retry;
 743        }
 744        i915_gem_ww_ctx_fini(&ww);
 745err_put:
 746        if (ret)
 747                i915_gem_object_put(obj);
 748        return ret;
 749}
 750
 751static int engine_setup_common(struct intel_engine_cs *engine)
 752{
 753        int err;
 754
 755        init_llist_head(&engine->barrier_tasks);
 756
 757        err = init_status_page(engine);
 758        if (err)
 759                return err;
 760
 761        engine->breadcrumbs = intel_breadcrumbs_create(engine);
 762        if (!engine->breadcrumbs) {
 763                err = -ENOMEM;
 764                goto err_status;
 765        }
 766
 767        engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
 768        if (!engine->sched_engine) {
 769                err = -ENOMEM;
 770                goto err_sched_engine;
 771        }
 772        engine->sched_engine->private_data = engine;
 773
 774        err = intel_engine_init_cmd_parser(engine);
 775        if (err)
 776                goto err_cmd_parser;
 777
 778        intel_engine_init_execlists(engine);
 779        intel_engine_init__pm(engine);
 780        intel_engine_init_retire(engine);
 781
 782        /* Use the whole device by default */
 783        engine->sseu =
 784                intel_sseu_from_device_info(&engine->gt->info.sseu);
 785
 786        intel_engine_init_workarounds(engine);
 787        intel_engine_init_whitelist(engine);
 788        intel_engine_init_ctx_wa(engine);
 789
 790        if (GRAPHICS_VER(engine->i915) >= 12)
 791                engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
 792
 793        return 0;
 794
 795err_cmd_parser:
 796        i915_sched_engine_put(engine->sched_engine);
 797err_sched_engine:
 798        intel_breadcrumbs_put(engine->breadcrumbs);
 799err_status:
 800        cleanup_status_page(engine);
 801        return err;
 802}
 803
 804struct measure_breadcrumb {
 805        struct i915_request rq;
 806        struct intel_ring ring;
 807        u32 cs[2048];
 808};
 809
 810static int measure_breadcrumb_dw(struct intel_context *ce)
 811{
 812        struct intel_engine_cs *engine = ce->engine;
 813        struct measure_breadcrumb *frame;
 814        int dw;
 815
 816        GEM_BUG_ON(!engine->gt->scratch);
 817
 818        frame = kzalloc(sizeof(*frame), GFP_KERNEL);
 819        if (!frame)
 820                return -ENOMEM;
 821
 822        frame->rq.engine = engine;
 823        frame->rq.context = ce;
 824        rcu_assign_pointer(frame->rq.timeline, ce->timeline);
 825        frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
 826
 827        frame->ring.vaddr = frame->cs;
 828        frame->ring.size = sizeof(frame->cs);
 829        frame->ring.wrap =
 830                BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
 831        frame->ring.effective_size = frame->ring.size;
 832        intel_ring_update_space(&frame->ring);
 833        frame->rq.ring = &frame->ring;
 834
 835        mutex_lock(&ce->timeline->mutex);
 836        spin_lock_irq(&engine->sched_engine->lock);
 837
 838        dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
 839
 840        spin_unlock_irq(&engine->sched_engine->lock);
 841        mutex_unlock(&ce->timeline->mutex);
 842
 843        GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
 844
 845        kfree(frame);
 846        return dw;
 847}
 848
 849struct intel_context *
 850intel_engine_create_pinned_context(struct intel_engine_cs *engine,
 851                                   struct i915_address_space *vm,
 852                                   unsigned int ring_size,
 853                                   unsigned int hwsp,
 854                                   struct lock_class_key *key,
 855                                   const char *name)
 856{
 857        struct intel_context *ce;
 858        int err;
 859
 860        ce = intel_context_create(engine);
 861        if (IS_ERR(ce))
 862                return ce;
 863
 864        __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
 865        ce->timeline = page_pack_bits(NULL, hwsp);
 866        ce->ring = NULL;
 867        ce->ring_size = ring_size;
 868
 869        i915_vm_put(ce->vm);
 870        ce->vm = i915_vm_get(vm);
 871
 872        err = intel_context_pin(ce); /* perma-pin so it is always available */
 873        if (err) {
 874                intel_context_put(ce);
 875                return ERR_PTR(err);
 876        }
 877
 878        /*
 879         * Give our perma-pinned kernel timelines a separate lockdep class,
 880         * so that we can use them from within the normal user timelines
 881         * should we need to inject GPU operations during their request
 882         * construction.
 883         */
 884        lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
 885
 886        return ce;
 887}
 888
 889void intel_engine_destroy_pinned_context(struct intel_context *ce)
 890{
 891        struct intel_engine_cs *engine = ce->engine;
 892        struct i915_vma *hwsp = engine->status_page.vma;
 893
 894        GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
 895
 896        mutex_lock(&hwsp->vm->mutex);
 897        list_del(&ce->timeline->engine_link);
 898        mutex_unlock(&hwsp->vm->mutex);
 899
 900        intel_context_unpin(ce);
 901        intel_context_put(ce);
 902}
 903
 904static struct intel_context *
 905create_kernel_context(struct intel_engine_cs *engine)
 906{
 907        static struct lock_class_key kernel;
 908
 909        return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
 910                                                  I915_GEM_HWS_SEQNO_ADDR,
 911                                                  &kernel, "kernel_context");
 912}
 913
 914/**
 915 * intel_engines_init_common - initialize cengine state which might require hw access
 916 * @engine: Engine to initialize.
 917 *
 918 * Initializes @engine@ structure members shared between legacy and execlists
 919 * submission modes which do require hardware access.
 920 *
 921 * Typcally done at later stages of submission mode specific engine setup.
 922 *
 923 * Returns zero on success or an error code on failure.
 924 */
 925static int engine_init_common(struct intel_engine_cs *engine)
 926{
 927        struct intel_context *ce;
 928        int ret;
 929
 930        engine->set_default_submission(engine);
 931
 932        /*
 933         * We may need to do things with the shrinker which
 934         * require us to immediately switch back to the default
 935         * context. This can cause a problem as pinning the
 936         * default context also requires GTT space which may not
 937         * be available. To avoid this we always pin the default
 938         * context.
 939         */
 940        ce = create_kernel_context(engine);
 941        if (IS_ERR(ce))
 942                return PTR_ERR(ce);
 943
 944        ret = measure_breadcrumb_dw(ce);
 945        if (ret < 0)
 946                goto err_context;
 947
 948        engine->emit_fini_breadcrumb_dw = ret;
 949        engine->kernel_context = ce;
 950
 951        return 0;
 952
 953err_context:
 954        intel_engine_destroy_pinned_context(ce);
 955        return ret;
 956}
 957
 958int intel_engines_init(struct intel_gt *gt)
 959{
 960        int (*setup)(struct intel_engine_cs *engine);
 961        struct intel_engine_cs *engine;
 962        enum intel_engine_id id;
 963        int err;
 964
 965        if (intel_uc_uses_guc_submission(&gt->uc)) {
 966                gt->submission_method = INTEL_SUBMISSION_GUC;
 967                setup = intel_guc_submission_setup;
 968        } else if (HAS_EXECLISTS(gt->i915)) {
 969                gt->submission_method = INTEL_SUBMISSION_ELSP;
 970                setup = intel_execlists_submission_setup;
 971        } else {
 972                gt->submission_method = INTEL_SUBMISSION_RING;
 973                setup = intel_ring_submission_setup;
 974        }
 975
 976        for_each_engine(engine, gt, id) {
 977                err = engine_setup_common(engine);
 978                if (err)
 979                        return err;
 980
 981                err = setup(engine);
 982                if (err)
 983                        return err;
 984
 985                err = engine_init_common(engine);
 986                if (err)
 987                        return err;
 988
 989                intel_engine_add_user(engine);
 990        }
 991
 992        return 0;
 993}
 994
 995/**
 996 * intel_engines_cleanup_common - cleans up the engine state created by
 997 *                                the common initiailizers.
 998 * @engine: Engine to cleanup.
 999 *
1000 * This cleans up everything created by the common helpers.
1001 */
1002void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1003{
1004        GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1005
1006        i915_sched_engine_put(engine->sched_engine);
1007        intel_breadcrumbs_put(engine->breadcrumbs);
1008
1009        intel_engine_fini_retire(engine);
1010        intel_engine_cleanup_cmd_parser(engine);
1011
1012        if (engine->default_state)
1013                fput(engine->default_state);
1014
1015        if (engine->kernel_context)
1016                intel_engine_destroy_pinned_context(engine->kernel_context);
1017
1018        GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1019        cleanup_status_page(engine);
1020
1021        intel_wa_list_free(&engine->ctx_wa_list);
1022        intel_wa_list_free(&engine->wa_list);
1023        intel_wa_list_free(&engine->whitelist);
1024}
1025
1026/**
1027 * intel_engine_resume - re-initializes the HW state of the engine
1028 * @engine: Engine to resume.
1029 *
1030 * Returns zero on success or an error code on failure.
1031 */
1032int intel_engine_resume(struct intel_engine_cs *engine)
1033{
1034        intel_engine_apply_workarounds(engine);
1035        intel_engine_apply_whitelist(engine);
1036
1037        return engine->resume(engine);
1038}
1039
1040u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1041{
1042        struct drm_i915_private *i915 = engine->i915;
1043
1044        u64 acthd;
1045
1046        if (GRAPHICS_VER(i915) >= 8)
1047                acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1048        else if (GRAPHICS_VER(i915) >= 4)
1049                acthd = ENGINE_READ(engine, RING_ACTHD);
1050        else
1051                acthd = ENGINE_READ(engine, ACTHD);
1052
1053        return acthd;
1054}
1055
1056u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1057{
1058        u64 bbaddr;
1059
1060        if (GRAPHICS_VER(engine->i915) >= 8)
1061                bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1062        else
1063                bbaddr = ENGINE_READ(engine, RING_BBADDR);
1064
1065        return bbaddr;
1066}
1067
1068static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1069{
1070        if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
1071                return 0;
1072
1073        /*
1074         * If we are doing a normal GPU reset, we can take our time and allow
1075         * the engine to quiesce. We've stopped submission to the engine, and
1076         * if we wait long enough an innocent context should complete and
1077         * leave the engine idle. So they should not be caught unaware by
1078         * the forthcoming GPU reset (which usually follows the stop_cs)!
1079         */
1080        return READ_ONCE(engine->props.stop_timeout_ms);
1081}
1082
1083static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1084                                  int fast_timeout_us,
1085                                  int slow_timeout_ms)
1086{
1087        struct intel_uncore *uncore = engine->uncore;
1088        const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1089        int err;
1090
1091        intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
1092        err = __intel_wait_for_register_fw(engine->uncore, mode,
1093                                           MODE_IDLE, MODE_IDLE,
1094                                           fast_timeout_us,
1095                                           slow_timeout_ms,
1096                                           NULL);
1097
1098        /* A final mmio read to let GPU writes be hopefully flushed to memory */
1099        intel_uncore_posting_read_fw(uncore, mode);
1100        return err;
1101}
1102
1103int intel_engine_stop_cs(struct intel_engine_cs *engine)
1104{
1105        int err = 0;
1106
1107        if (GRAPHICS_VER(engine->i915) < 3)
1108                return -ENODEV;
1109
1110        ENGINE_TRACE(engine, "\n");
1111        if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1112                ENGINE_TRACE(engine,
1113                             "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
1114                             ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1115                             ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1116
1117                /*
1118                 * Sometimes we observe that the idle flag is not
1119                 * set even though the ring is empty. So double
1120                 * check before giving up.
1121                 */
1122                if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1123                    (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1124                        err = -ETIMEDOUT;
1125        }
1126
1127        return err;
1128}
1129
1130void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1131{
1132        ENGINE_TRACE(engine, "\n");
1133
1134        ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1135}
1136
1137const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1138{
1139        switch (type) {
1140        case I915_CACHE_NONE: return " uncached";
1141        case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1142        case I915_CACHE_L3_LLC: return " L3+LLC";
1143        case I915_CACHE_WT: return " WT";
1144        default: return "";
1145        }
1146}
1147
1148static u32
1149read_subslice_reg(const struct intel_engine_cs *engine,
1150                  int slice, int subslice, i915_reg_t reg)
1151{
1152        return intel_uncore_read_with_mcr_steering(engine->uncore, reg,
1153                                                   slice, subslice);
1154}
1155
1156/* NB: please notice the memset */
1157void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1158                               struct intel_instdone *instdone)
1159{
1160        struct drm_i915_private *i915 = engine->i915;
1161        const struct sseu_dev_info *sseu = &engine->gt->info.sseu;
1162        struct intel_uncore *uncore = engine->uncore;
1163        u32 mmio_base = engine->mmio_base;
1164        int slice;
1165        int subslice;
1166
1167        memset(instdone, 0, sizeof(*instdone));
1168
1169        switch (GRAPHICS_VER(i915)) {
1170        default:
1171                instdone->instdone =
1172                        intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1173
1174                if (engine->id != RCS0)
1175                        break;
1176
1177                instdone->slice_common =
1178                        intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1179                if (GRAPHICS_VER(i915) >= 12) {
1180                        instdone->slice_common_extra[0] =
1181                                intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
1182                        instdone->slice_common_extra[1] =
1183                                intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
1184                }
1185                for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1186                        instdone->sampler[slice][subslice] =
1187                                read_subslice_reg(engine, slice, subslice,
1188                                                  GEN7_SAMPLER_INSTDONE);
1189                        instdone->row[slice][subslice] =
1190                                read_subslice_reg(engine, slice, subslice,
1191                                                  GEN7_ROW_INSTDONE);
1192                }
1193                break;
1194        case 7:
1195                instdone->instdone =
1196                        intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1197
1198                if (engine->id != RCS0)
1199                        break;
1200
1201                instdone->slice_common =
1202                        intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1203                instdone->sampler[0][0] =
1204                        intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1205                instdone->row[0][0] =
1206                        intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1207
1208                break;
1209        case 6:
1210        case 5:
1211        case 4:
1212                instdone->instdone =
1213                        intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1214                if (engine->id == RCS0)
1215                        /* HACK: Using the wrong struct member */
1216                        instdone->slice_common =
1217                                intel_uncore_read(uncore, GEN4_INSTDONE1);
1218                break;
1219        case 3:
1220        case 2:
1221                instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1222                break;
1223        }
1224}
1225
1226static bool ring_is_idle(struct intel_engine_cs *engine)
1227{
1228        bool idle = true;
1229
1230        if (I915_SELFTEST_ONLY(!engine->mmio_base))
1231                return true;
1232
1233        if (!intel_engine_pm_get_if_awake(engine))
1234                return true;
1235
1236        /* First check that no commands are left in the ring */
1237        if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1238            (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1239                idle = false;
1240
1241        /* No bit for gen2, so assume the CS parser is idle */
1242        if (GRAPHICS_VER(engine->i915) > 2 &&
1243            !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1244                idle = false;
1245
1246        intel_engine_pm_put(engine);
1247
1248        return idle;
1249}
1250
1251void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1252{
1253        struct tasklet_struct *t = &engine->sched_engine->tasklet;
1254
1255        if (!t->callback)
1256                return;
1257
1258        local_bh_disable();
1259        if (tasklet_trylock(t)) {
1260                /* Must wait for any GPU reset in progress. */
1261                if (__tasklet_is_enabled(t))
1262                        t->callback(t);
1263                tasklet_unlock(t);
1264        }
1265        local_bh_enable();
1266
1267        /* Synchronise and wait for the tasklet on another CPU */
1268        if (sync)
1269                tasklet_unlock_wait(t);
1270}
1271
1272/**
1273 * intel_engine_is_idle() - Report if the engine has finished process all work
1274 * @engine: the intel_engine_cs
1275 *
1276 * Return true if there are no requests pending, nothing left to be submitted
1277 * to hardware, and that the engine is idle.
1278 */
1279bool intel_engine_is_idle(struct intel_engine_cs *engine)
1280{
1281        /* More white lies, if wedged, hw state is inconsistent */
1282        if (intel_gt_is_wedged(engine->gt))
1283                return true;
1284
1285        if (!intel_engine_pm_is_awake(engine))
1286                return true;
1287
1288        /* Waiting to drain ELSP? */
1289        intel_synchronize_hardirq(engine->i915);
1290        intel_engine_flush_submission(engine);
1291
1292        /* ELSP is empty, but there are ready requests? E.g. after reset */
1293        if (!i915_sched_engine_is_empty(engine->sched_engine))
1294                return false;
1295
1296        /* Ring stopped? */
1297        return ring_is_idle(engine);
1298}
1299
1300bool intel_engines_are_idle(struct intel_gt *gt)
1301{
1302        struct intel_engine_cs *engine;
1303        enum intel_engine_id id;
1304
1305        /*
1306         * If the driver is wedged, HW state may be very inconsistent and
1307         * report that it is still busy, even though we have stopped using it.
1308         */
1309        if (intel_gt_is_wedged(gt))
1310                return true;
1311
1312        /* Already parked (and passed an idleness test); must still be idle */
1313        if (!READ_ONCE(gt->awake))
1314                return true;
1315
1316        for_each_engine(engine, gt, id) {
1317                if (!intel_engine_is_idle(engine))
1318                        return false;
1319        }
1320
1321        return true;
1322}
1323
1324bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1325{
1326        if (!engine->irq_enable)
1327                return false;
1328
1329        /* Caller disables interrupts */
1330        spin_lock(&engine->gt->irq_lock);
1331        engine->irq_enable(engine);
1332        spin_unlock(&engine->gt->irq_lock);
1333
1334        return true;
1335}
1336
1337void intel_engine_irq_disable(struct intel_engine_cs *engine)
1338{
1339        if (!engine->irq_disable)
1340                return;
1341
1342        /* Caller disables interrupts */
1343        spin_lock(&engine->gt->irq_lock);
1344        engine->irq_disable(engine);
1345        spin_unlock(&engine->gt->irq_lock);
1346}
1347
1348void intel_engines_reset_default_submission(struct intel_gt *gt)
1349{
1350        struct intel_engine_cs *engine;
1351        enum intel_engine_id id;
1352
1353        for_each_engine(engine, gt, id) {
1354                if (engine->sanitize)
1355                        engine->sanitize(engine);
1356
1357                engine->set_default_submission(engine);
1358        }
1359}
1360
1361bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1362{
1363        switch (GRAPHICS_VER(engine->i915)) {
1364        case 2:
1365                return false; /* uses physical not virtual addresses */
1366        case 3:
1367                /* maybe only uses physical not virtual addresses */
1368                return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1369        case 4:
1370                return !IS_I965G(engine->i915); /* who knows! */
1371        case 6:
1372                return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1373        default:
1374                return true;
1375        }
1376}
1377
1378static struct intel_timeline *get_timeline(struct i915_request *rq)
1379{
1380        struct intel_timeline *tl;
1381
1382        /*
1383         * Even though we are holding the engine->sched_engine->lock here, there
1384         * is no control over the submission queue per-se and we are
1385         * inspecting the active state at a random point in time, with an
1386         * unknown queue. Play safe and make sure the timeline remains valid.
1387         * (Only being used for pretty printing, one extra kref shouldn't
1388         * cause a camel stampede!)
1389         */
1390        rcu_read_lock();
1391        tl = rcu_dereference(rq->timeline);
1392        if (!kref_get_unless_zero(&tl->kref))
1393                tl = NULL;
1394        rcu_read_unlock();
1395
1396        return tl;
1397}
1398
1399static int print_ring(char *buf, int sz, struct i915_request *rq)
1400{
1401        int len = 0;
1402
1403        if (!i915_request_signaled(rq)) {
1404                struct intel_timeline *tl = get_timeline(rq);
1405
1406                len = scnprintf(buf, sz,
1407                                "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1408                                i915_ggtt_offset(rq->ring->vma),
1409                                tl ? tl->hwsp_offset : 0,
1410                                hwsp_seqno(rq),
1411                                DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1412                                                      1000 * 1000));
1413
1414                if (tl)
1415                        intel_timeline_put(tl);
1416        }
1417
1418        return len;
1419}
1420
1421static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1422{
1423        const size_t rowsize = 8 * sizeof(u32);
1424        const void *prev = NULL;
1425        bool skip = false;
1426        size_t pos;
1427
1428        for (pos = 0; pos < len; pos += rowsize) {
1429                char line[128];
1430
1431                if (prev && !memcmp(prev, buf + pos, rowsize)) {
1432                        if (!skip) {
1433                                drm_printf(m, "*\n");
1434                                skip = true;
1435                        }
1436                        continue;
1437                }
1438
1439                WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1440                                                rowsize, sizeof(u32),
1441                                                line, sizeof(line),
1442                                                false) >= sizeof(line));
1443                drm_printf(m, "[%04zx] %s\n", pos, line);
1444
1445                prev = buf + pos;
1446                skip = false;
1447        }
1448}
1449
1450static const char *repr_timer(const struct timer_list *t)
1451{
1452        if (!READ_ONCE(t->expires))
1453                return "inactive";
1454
1455        if (timer_pending(t))
1456                return "active";
1457
1458        return "expired";
1459}
1460
1461static void intel_engine_print_registers(struct intel_engine_cs *engine,
1462                                         struct drm_printer *m)
1463{
1464        struct drm_i915_private *dev_priv = engine->i915;
1465        struct intel_engine_execlists * const execlists = &engine->execlists;
1466        u64 addr;
1467
1468        if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7))
1469                drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1470        if (HAS_EXECLISTS(dev_priv)) {
1471                drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
1472                           ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
1473                drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
1474                           ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
1475        }
1476        drm_printf(m, "\tRING_START: 0x%08x\n",
1477                   ENGINE_READ(engine, RING_START));
1478        drm_printf(m, "\tRING_HEAD:  0x%08x\n",
1479                   ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1480        drm_printf(m, "\tRING_TAIL:  0x%08x\n",
1481                   ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1482        drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
1483                   ENGINE_READ(engine, RING_CTL),
1484                   ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1485        if (GRAPHICS_VER(engine->i915) > 2) {
1486                drm_printf(m, "\tRING_MODE:  0x%08x%s\n",
1487                           ENGINE_READ(engine, RING_MI_MODE),
1488                           ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1489        }
1490
1491        if (GRAPHICS_VER(dev_priv) >= 6) {
1492                drm_printf(m, "\tRING_IMR:   0x%08x\n",
1493                           ENGINE_READ(engine, RING_IMR));
1494                drm_printf(m, "\tRING_ESR:   0x%08x\n",
1495                           ENGINE_READ(engine, RING_ESR));
1496                drm_printf(m, "\tRING_EMR:   0x%08x\n",
1497                           ENGINE_READ(engine, RING_EMR));
1498                drm_printf(m, "\tRING_EIR:   0x%08x\n",
1499                           ENGINE_READ(engine, RING_EIR));
1500        }
1501
1502        addr = intel_engine_get_active_head(engine);
1503        drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
1504                   upper_32_bits(addr), lower_32_bits(addr));
1505        addr = intel_engine_get_last_batch_head(engine);
1506        drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1507                   upper_32_bits(addr), lower_32_bits(addr));
1508        if (GRAPHICS_VER(dev_priv) >= 8)
1509                addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1510        else if (GRAPHICS_VER(dev_priv) >= 4)
1511                addr = ENGINE_READ(engine, RING_DMA_FADD);
1512        else
1513                addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1514        drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1515                   upper_32_bits(addr), lower_32_bits(addr));
1516        if (GRAPHICS_VER(dev_priv) >= 4) {
1517                drm_printf(m, "\tIPEIR: 0x%08x\n",
1518                           ENGINE_READ(engine, RING_IPEIR));
1519                drm_printf(m, "\tIPEHR: 0x%08x\n",
1520                           ENGINE_READ(engine, RING_IPEHR));
1521        } else {
1522                drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1523                drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1524        }
1525
1526        if (intel_engine_uses_guc(engine)) {
1527                /* nothing to print yet */
1528        } else if (HAS_EXECLISTS(dev_priv)) {
1529                struct i915_request * const *port, *rq;
1530                const u32 *hws =
1531                        &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1532                const u8 num_entries = execlists->csb_size;
1533                unsigned int idx;
1534                u8 read, write;
1535
1536                drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1537                           yesno(test_bit(TASKLET_STATE_SCHED,
1538                                          &engine->sched_engine->tasklet.state)),
1539                           enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)),
1540                           repr_timer(&engine->execlists.preempt),
1541                           repr_timer(&engine->execlists.timer));
1542
1543                read = execlists->csb_head;
1544                write = READ_ONCE(*execlists->csb_write);
1545
1546                drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1547                           ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1548                           ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1549                           read, write, num_entries);
1550
1551                if (read >= num_entries)
1552                        read = 0;
1553                if (write >= num_entries)
1554                        write = 0;
1555                if (read > write)
1556                        write += num_entries;
1557                while (read < write) {
1558                        idx = ++read % num_entries;
1559                        drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1560                                   idx, hws[idx * 2], hws[idx * 2 + 1]);
1561                }
1562
1563                i915_sched_engine_active_lock_bh(engine->sched_engine);
1564                rcu_read_lock();
1565                for (port = execlists->active; (rq = *port); port++) {
1566                        char hdr[160];
1567                        int len;
1568
1569                        len = scnprintf(hdr, sizeof(hdr),
1570                                        "\t\tActive[%d]:  ccid:%08x%s%s, ",
1571                                        (int)(port - execlists->active),
1572                                        rq->context->lrc.ccid,
1573                                        intel_context_is_closed(rq->context) ? "!" : "",
1574                                        intel_context_is_banned(rq->context) ? "*" : "");
1575                        len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1576                        scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1577                        i915_request_show(m, rq, hdr, 0);
1578                }
1579                for (port = execlists->pending; (rq = *port); port++) {
1580                        char hdr[160];
1581                        int len;
1582
1583                        len = scnprintf(hdr, sizeof(hdr),
1584                                        "\t\tPending[%d]: ccid:%08x%s%s, ",
1585                                        (int)(port - execlists->pending),
1586                                        rq->context->lrc.ccid,
1587                                        intel_context_is_closed(rq->context) ? "!" : "",
1588                                        intel_context_is_banned(rq->context) ? "*" : "");
1589                        len += print_ring(hdr + len, sizeof(hdr) - len, rq);
1590                        scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1591                        i915_request_show(m, rq, hdr, 0);
1592                }
1593                rcu_read_unlock();
1594                i915_sched_engine_active_unlock_bh(engine->sched_engine);
1595        } else if (GRAPHICS_VER(dev_priv) > 6) {
1596                drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1597                           ENGINE_READ(engine, RING_PP_DIR_BASE));
1598                drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1599                           ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1600                drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1601                           ENGINE_READ(engine, RING_PP_DIR_DCLV));
1602        }
1603}
1604
1605static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1606{
1607        void *ring;
1608        int size;
1609
1610        drm_printf(m,
1611                   "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1612                   rq->head, rq->postfix, rq->tail,
1613                   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1614                   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1615
1616        size = rq->tail - rq->head;
1617        if (rq->tail < rq->head)
1618                size += rq->ring->size;
1619
1620        ring = kmalloc(size, GFP_ATOMIC);
1621        if (ring) {
1622                const void *vaddr = rq->ring->vaddr;
1623                unsigned int head = rq->head;
1624                unsigned int len = 0;
1625
1626                if (rq->tail < head) {
1627                        len = rq->ring->size - head;
1628                        memcpy(ring, vaddr + head, len);
1629                        head = 0;
1630                }
1631                memcpy(ring + len, vaddr + head, size - len);
1632
1633                hexdump(m, ring, size);
1634                kfree(ring);
1635        }
1636}
1637
1638static unsigned long list_count(struct list_head *list)
1639{
1640        struct list_head *pos;
1641        unsigned long count = 0;
1642
1643        list_for_each(pos, list)
1644                count++;
1645
1646        return count;
1647}
1648
1649static unsigned long read_ul(void *p, size_t x)
1650{
1651        return *(unsigned long *)(p + x);
1652}
1653
1654static void print_properties(struct intel_engine_cs *engine,
1655                             struct drm_printer *m)
1656{
1657        static const struct pmap {
1658                size_t offset;
1659                const char *name;
1660        } props[] = {
1661#define P(x) { \
1662        .offset = offsetof(typeof(engine->props), x), \
1663        .name = #x \
1664}
1665                P(heartbeat_interval_ms),
1666                P(max_busywait_duration_ns),
1667                P(preempt_timeout_ms),
1668                P(stop_timeout_ms),
1669                P(timeslice_duration_ms),
1670
1671                {},
1672#undef P
1673        };
1674        const struct pmap *p;
1675
1676        drm_printf(m, "\tProperties:\n");
1677        for (p = props; p->name; p++)
1678                drm_printf(m, "\t\t%s: %lu [default %lu]\n",
1679                           p->name,
1680                           read_ul(&engine->props, p->offset),
1681                           read_ul(&engine->defaults, p->offset));
1682}
1683
1684static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
1685{
1686        struct intel_timeline *tl = get_timeline(rq);
1687
1688        i915_request_show(m, rq, msg, 0);
1689
1690        drm_printf(m, "\t\tring->start:  0x%08x\n",
1691                   i915_ggtt_offset(rq->ring->vma));
1692        drm_printf(m, "\t\tring->head:   0x%08x\n",
1693                   rq->ring->head);
1694        drm_printf(m, "\t\tring->tail:   0x%08x\n",
1695                   rq->ring->tail);
1696        drm_printf(m, "\t\tring->emit:   0x%08x\n",
1697                   rq->ring->emit);
1698        drm_printf(m, "\t\tring->space:  0x%08x\n",
1699                   rq->ring->space);
1700
1701        if (tl) {
1702                drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
1703                           tl->hwsp_offset);
1704                intel_timeline_put(tl);
1705        }
1706
1707        print_request_ring(m, rq);
1708
1709        if (rq->context->lrc_reg_state) {
1710                drm_printf(m, "Logical Ring Context:\n");
1711                hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1712        }
1713}
1714
1715void intel_engine_dump_active_requests(struct list_head *requests,
1716                                       struct i915_request *hung_rq,
1717                                       struct drm_printer *m)
1718{
1719        struct i915_request *rq;
1720        const char *msg;
1721        enum i915_request_state state;
1722
1723        list_for_each_entry(rq, requests, sched.link) {
1724                if (rq == hung_rq)
1725                        continue;
1726
1727                state = i915_test_request_state(rq);
1728                if (state < I915_REQUEST_QUEUED)
1729                        continue;
1730
1731                if (state == I915_REQUEST_ACTIVE)
1732                        msg = "\t\tactive on engine";
1733                else
1734                        msg = "\t\tactive in queue";
1735
1736                engine_dump_request(rq, m, msg);
1737        }
1738}
1739
1740static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
1741{
1742        struct i915_request *hung_rq = NULL;
1743        struct intel_context *ce;
1744        bool guc;
1745
1746        /*
1747         * No need for an engine->irq_seqno_barrier() before the seqno reads.
1748         * The GPU is still running so requests are still executing and any
1749         * hardware reads will be out of date by the time they are reported.
1750         * But the intention here is just to report an instantaneous snapshot
1751         * so that's fine.
1752         */
1753        lockdep_assert_held(&engine->sched_engine->lock);
1754
1755        drm_printf(m, "\tRequests:\n");
1756
1757        guc = intel_uc_uses_guc_submission(&engine->gt->uc);
1758        if (guc) {
1759                ce = intel_engine_get_hung_context(engine);
1760                if (ce)
1761                        hung_rq = intel_context_find_active_request(ce);
1762        } else {
1763                hung_rq = intel_engine_execlist_find_hung_request(engine);
1764        }
1765
1766        if (hung_rq)
1767                engine_dump_request(hung_rq, m, "\t\thung");
1768
1769        if (guc)
1770                intel_guc_dump_active_requests(engine, hung_rq, m);
1771        else
1772                intel_engine_dump_active_requests(&engine->sched_engine->requests,
1773                                                  hung_rq, m);
1774}
1775
1776void intel_engine_dump(struct intel_engine_cs *engine,
1777                       struct drm_printer *m,
1778                       const char *header, ...)
1779{
1780        struct i915_gpu_error * const error = &engine->i915->gpu_error;
1781        struct i915_request *rq;
1782        intel_wakeref_t wakeref;
1783        unsigned long flags;
1784        ktime_t dummy;
1785
1786        if (header) {
1787                va_list ap;
1788
1789                va_start(ap, header);
1790                drm_vprintf(m, header, &ap);
1791                va_end(ap);
1792        }
1793
1794        if (intel_gt_is_wedged(engine->gt))
1795                drm_printf(m, "*** WEDGED ***\n");
1796
1797        drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1798        drm_printf(m, "\tBarriers?: %s\n",
1799                   yesno(!llist_empty(&engine->barrier_tasks)));
1800        drm_printf(m, "\tLatency: %luus\n",
1801                   ewma__engine_latency_read(&engine->latency));
1802        if (intel_engine_supports_stats(engine))
1803                drm_printf(m, "\tRuntime: %llums\n",
1804                           ktime_to_ms(intel_engine_get_busy_time(engine,
1805                                                                  &dummy)));
1806        drm_printf(m, "\tForcewake: %x domains, %d active\n",
1807                   engine->fw_domain, READ_ONCE(engine->fw_active));
1808
1809        rcu_read_lock();
1810        rq = READ_ONCE(engine->heartbeat.systole);
1811        if (rq)
1812                drm_printf(m, "\tHeartbeat: %d ms ago\n",
1813                           jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1814        rcu_read_unlock();
1815        drm_printf(m, "\tReset count: %d (global %d)\n",
1816                   i915_reset_engine_count(error, engine),
1817                   i915_reset_count(error));
1818        print_properties(engine, m);
1819
1820        spin_lock_irqsave(&engine->sched_engine->lock, flags);
1821        engine_dump_active_requests(engine, m);
1822
1823        drm_printf(m, "\tOn hold?: %lu\n",
1824                   list_count(&engine->sched_engine->hold));
1825        spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
1826
1827        drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
1828        wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1829        if (wakeref) {
1830                intel_engine_print_registers(engine, m);
1831                intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1832        } else {
1833                drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1834        }
1835
1836        intel_execlists_show_requests(engine, m, i915_request_show, 8);
1837
1838        drm_printf(m, "HWSP:\n");
1839        hexdump(m, engine->status_page.addr, PAGE_SIZE);
1840
1841        drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1842
1843        intel_engine_print_breadcrumbs(engine, m);
1844}
1845
1846static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine,
1847                                            ktime_t *now)
1848{
1849        ktime_t total = engine->stats.total;
1850
1851        /*
1852         * If the engine is executing something at the moment
1853         * add it to the total.
1854         */
1855        *now = ktime_get();
1856        if (READ_ONCE(engine->stats.active))
1857                total = ktime_add(total, ktime_sub(*now, engine->stats.start));
1858
1859        return total;
1860}
1861
1862/**
1863 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1864 * @engine: engine to report on
1865 * @now: monotonic timestamp of sampling
1866 *
1867 * Returns accumulated time @engine was busy since engine stats were enabled.
1868 */
1869ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
1870{
1871        unsigned int seq;
1872        ktime_t total;
1873
1874        do {
1875                seq = read_seqcount_begin(&engine->stats.lock);
1876                total = __intel_engine_get_busy_time(engine, now);
1877        } while (read_seqcount_retry(&engine->stats.lock, seq));
1878
1879        return total;
1880}
1881
1882struct intel_context *
1883intel_engine_create_virtual(struct intel_engine_cs **siblings,
1884                            unsigned int count)
1885{
1886        if (count == 0)
1887                return ERR_PTR(-EINVAL);
1888
1889        if (count == 1)
1890                return intel_context_create(siblings[0]);
1891
1892        GEM_BUG_ON(!siblings[0]->cops->create_virtual);
1893        return siblings[0]->cops->create_virtual(siblings, count);
1894}
1895
1896struct i915_request *
1897intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
1898{
1899        struct i915_request *request, *active = NULL;
1900
1901        /*
1902         * This search does not work in GuC submission mode. However, the GuC
1903         * will report the hanging context directly to the driver itself. So
1904         * the driver should never get here when in GuC mode.
1905         */
1906        GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
1907
1908        /*
1909         * We are called by the error capture, reset and to dump engine
1910         * state at random points in time. In particular, note that neither is
1911         * crucially ordered with an interrupt. After a hang, the GPU is dead
1912         * and we assume that no more writes can happen (we waited long enough
1913         * for all writes that were in transaction to be flushed) - adding an
1914         * extra delay for a recent interrupt is pointless. Hence, we do
1915         * not need an engine->irq_seqno_barrier() before the seqno reads.
1916         * At all other times, we must assume the GPU is still running, but
1917         * we only care about the snapshot of this moment.
1918         */
1919        lockdep_assert_held(&engine->sched_engine->lock);
1920
1921        rcu_read_lock();
1922        request = execlists_active(&engine->execlists);
1923        if (request) {
1924                struct intel_timeline *tl = request->context->timeline;
1925
1926                list_for_each_entry_from_reverse(request, &tl->requests, link) {
1927                        if (__i915_request_is_complete(request))
1928                                break;
1929
1930                        active = request;
1931                }
1932        }
1933        rcu_read_unlock();
1934        if (active)
1935                return active;
1936
1937        list_for_each_entry(request, &engine->sched_engine->requests,
1938                            sched.link) {
1939                if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
1940                        continue;
1941
1942                active = request;
1943                break;
1944        }
1945
1946        return active;
1947}
1948
1949#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1950#include "mock_engine.c"
1951#include "selftest_engine.c"
1952#include "selftest_engine_cs.c"
1953#endif
1954