linux/drivers/gpu/drm/i915/intel_ringbuffer.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _INTEL_RINGBUFFER_H_
   3#define _INTEL_RINGBUFFER_H_
   4
   5#include <linux/hashtable.h>
   6#include <linux/seqlock.h>
   7
   8#include "i915_gem_batch_pool.h"
   9
  10#include "i915_reg.h"
  11#include "i915_pmu.h"
  12#include "i915_request.h"
  13#include "i915_selftest.h"
  14#include "i915_timeline.h"
  15#include "intel_gpu_commands.h"
  16
  17struct drm_printer;
  18struct i915_sched_attr;
  19
  20#define I915_CMD_HASH_ORDER 9
  21
  22/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  23 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
  24 * to give some inclination as to some of the magic values used in the various
  25 * workarounds!
  26 */
  27#define CACHELINE_BYTES 64
  28#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
  29
  30struct intel_hw_status_page {
  31        struct i915_vma *vma;
  32        u32 *page_addr;
  33        u32 ggtt_offset;
  34};
  35
  36#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
  37#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
  38
  39#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
  40#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
  41
  42#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
  43#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
  44
  45#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
  46#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
  47
  48#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
  49#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
  50
  51#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
  52#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
  53
  54/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  55 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  56 */
  57enum intel_engine_hangcheck_action {
  58        ENGINE_IDLE = 0,
  59        ENGINE_WAIT,
  60        ENGINE_ACTIVE_SEQNO,
  61        ENGINE_ACTIVE_HEAD,
  62        ENGINE_ACTIVE_SUBUNITS,
  63        ENGINE_WAIT_KICK,
  64        ENGINE_DEAD,
  65};
  66
  67static inline const char *
  68hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
  69{
  70        switch (a) {
  71        case ENGINE_IDLE:
  72                return "idle";
  73        case ENGINE_WAIT:
  74                return "wait";
  75        case ENGINE_ACTIVE_SEQNO:
  76                return "active seqno";
  77        case ENGINE_ACTIVE_HEAD:
  78                return "active head";
  79        case ENGINE_ACTIVE_SUBUNITS:
  80                return "active subunits";
  81        case ENGINE_WAIT_KICK:
  82                return "wait kick";
  83        case ENGINE_DEAD:
  84                return "dead";
  85        }
  86
  87        return "unknown";
  88}
  89
  90#define I915_MAX_SLICES 3
  91#define I915_MAX_SUBSLICES 8
  92
  93#define instdone_slice_mask(dev_priv__) \
  94        (INTEL_GEN(dev_priv__) == 7 ? \
  95         1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
  96
  97#define instdone_subslice_mask(dev_priv__) \
  98        (INTEL_GEN(dev_priv__) == 7 ? \
  99         1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
 100
 101#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
 102        for ((slice__) = 0, (subslice__) = 0; \
 103             (slice__) < I915_MAX_SLICES; \
 104             (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
 105               (slice__) += ((subslice__) == 0)) \
 106                for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
 107                            (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
 108
 109struct intel_instdone {
 110        u32 instdone;
 111        /* The following exist only in the RCS engine */
 112        u32 slice_common;
 113        u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 114        u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 115};
 116
 117struct intel_engine_hangcheck {
 118        u64 acthd;
 119        u32 seqno;
 120        enum intel_engine_hangcheck_action action;
 121        unsigned long action_timestamp;
 122        int deadlock;
 123        struct intel_instdone instdone;
 124        struct i915_request *active_request;
 125        bool stalled;
 126};
 127
 128struct intel_ring {
 129        struct i915_vma *vma;
 130        void *vaddr;
 131
 132        struct i915_timeline *timeline;
 133        struct list_head request_list;
 134        struct list_head active_link;
 135
 136        u32 head;
 137        u32 tail;
 138        u32 emit;
 139
 140        u32 space;
 141        u32 size;
 142        u32 effective_size;
 143};
 144
 145struct i915_gem_context;
 146struct drm_i915_reg_table;
 147
 148/*
 149 * we use a single page to load ctx workarounds so all of these
 150 * values are referred in terms of dwords
 151 *
 152 * struct i915_wa_ctx_bb:
 153 *  offset: specifies batch starting position, also helpful in case
 154 *    if we want to have multiple batches at different offsets based on
 155 *    some criteria. It is not a requirement at the moment but provides
 156 *    an option for future use.
 157 *  size: size of the batch in DWORDS
 158 */
 159struct i915_ctx_workarounds {
 160        struct i915_wa_ctx_bb {
 161                u32 offset;
 162                u32 size;
 163        } indirect_ctx, per_ctx;
 164        struct i915_vma *vma;
 165};
 166
 167struct i915_request;
 168
 169#define I915_MAX_VCS    4
 170#define I915_MAX_VECS   2
 171
 172/*
 173 * Engine IDs definitions.
 174 * Keep instances of the same type engine together.
 175 */
 176enum intel_engine_id {
 177        RCS = 0,
 178        BCS,
 179        VCS,
 180        VCS2,
 181        VCS3,
 182        VCS4,
 183#define _VCS(n) (VCS + (n))
 184        VECS,
 185        VECS2
 186#define _VECS(n) (VECS + (n))
 187};
 188
 189struct i915_priolist {
 190        struct rb_node node;
 191        struct list_head requests;
 192        int priority;
 193};
 194
 195/**
 196 * struct intel_engine_execlists - execlist submission queue and port state
 197 *
 198 * The struct intel_engine_execlists represents the combined logical state of
 199 * driver and the hardware state for execlist mode of submission.
 200 */
 201struct intel_engine_execlists {
 202        /**
 203         * @tasklet: softirq tasklet for bottom handler
 204         */
 205        struct tasklet_struct tasklet;
 206
 207        /**
 208         * @default_priolist: priority list for I915_PRIORITY_NORMAL
 209         */
 210        struct i915_priolist default_priolist;
 211
 212        /**
 213         * @no_priolist: priority lists disabled
 214         */
 215        bool no_priolist;
 216
 217        /**
 218         * @submit_reg: gen-specific execlist submission register
 219         * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
 220         * the ExecList Submission Queue Contents register array for Gen11+
 221         */
 222        u32 __iomem *submit_reg;
 223
 224        /**
 225         * @ctrl_reg: the enhanced execlists control register, used to load the
 226         * submit queue on the HW and to request preemptions to idle
 227         */
 228        u32 __iomem *ctrl_reg;
 229
 230        /**
 231         * @port: execlist port states
 232         *
 233         * For each hardware ELSP (ExecList Submission Port) we keep
 234         * track of the last request and the number of times we submitted
 235         * that port to hw. We then count the number of times the hw reports
 236         * a context completion or preemption. As only one context can
 237         * be active on hw, we limit resubmission of context to port[0]. This
 238         * is called Lite Restore, of the context.
 239         */
 240        struct execlist_port {
 241                /**
 242                 * @request_count: combined request and submission count
 243                 */
 244                struct i915_request *request_count;
 245#define EXECLIST_COUNT_BITS 2
 246#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
 247#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
 248#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
 249#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
 250#define port_set(p, packed) ((p)->request_count = (packed))
 251#define port_isset(p) ((p)->request_count)
 252#define port_index(p, execlists) ((p) - (execlists)->port)
 253
 254                /**
 255                 * @context_id: context ID for port
 256                 */
 257                GEM_DEBUG_DECL(u32 context_id);
 258
 259#define EXECLIST_MAX_PORTS 2
 260        } port[EXECLIST_MAX_PORTS];
 261
 262        /**
 263         * @active: is the HW active? We consider the HW as active after
 264         * submitting any context for execution and until we have seen the
 265         * last context completion event. After that, we do not expect any
 266         * more events until we submit, and so can park the HW.
 267         *
 268         * As we have a small number of different sources from which we feed
 269         * the HW, we track the state of each inside a single bitfield.
 270         */
 271        unsigned int active;
 272#define EXECLISTS_ACTIVE_USER 0
 273#define EXECLISTS_ACTIVE_PREEMPT 1
 274#define EXECLISTS_ACTIVE_HWACK 2
 275
 276        /**
 277         * @port_mask: number of execlist ports - 1
 278         */
 279        unsigned int port_mask;
 280
 281        /**
 282         * @queue_priority: Highest pending priority.
 283         *
 284         * When we add requests into the queue, or adjust the priority of
 285         * executing requests, we compute the maximum priority of those
 286         * pending requests. We can then use this value to determine if
 287         * we need to preempt the executing requests to service the queue.
 288         */
 289        int queue_priority;
 290
 291        /**
 292         * @queue: queue of requests, in priority lists
 293         */
 294        struct rb_root queue;
 295
 296        /**
 297         * @first: leftmost level in priority @queue
 298         */
 299        struct rb_node *first;
 300
 301        /**
 302         * @fw_domains: forcewake domains for irq tasklet
 303         */
 304        unsigned int fw_domains;
 305
 306        /**
 307         * @csb_head: context status buffer head
 308         */
 309        unsigned int csb_head;
 310
 311        /**
 312         * @csb_use_mmio: access csb through mmio, instead of hwsp
 313         */
 314        bool csb_use_mmio;
 315
 316        /**
 317         * @preempt_complete_status: expected CSB upon completing preemption
 318         */
 319        u32 preempt_complete_status;
 320};
 321
 322#define INTEL_ENGINE_CS_MAX_NAME 8
 323
 324struct intel_engine_cs {
 325        struct drm_i915_private *i915;
 326        char name[INTEL_ENGINE_CS_MAX_NAME];
 327
 328        enum intel_engine_id id;
 329        unsigned int hw_id;
 330        unsigned int guc_id;
 331
 332        u8 uabi_id;
 333        u8 uabi_class;
 334
 335        u8 class;
 336        u8 instance;
 337        u32 context_size;
 338        u32 mmio_base;
 339
 340        struct intel_ring *buffer;
 341
 342        struct i915_timeline timeline;
 343
 344        struct drm_i915_gem_object *default_state;
 345
 346        atomic_t irq_count;
 347        unsigned long irq_posted;
 348#define ENGINE_IRQ_BREADCRUMB 0
 349#define ENGINE_IRQ_EXECLIST 1
 350
 351        /* Rather than have every client wait upon all user interrupts,
 352         * with the herd waking after every interrupt and each doing the
 353         * heavyweight seqno dance, we delegate the task (of being the
 354         * bottom-half of the user interrupt) to the first client. After
 355         * every interrupt, we wake up one client, who does the heavyweight
 356         * coherent seqno read and either goes back to sleep (if incomplete),
 357         * or wakes up all the completed clients in parallel, before then
 358         * transferring the bottom-half status to the next client in the queue.
 359         *
 360         * Compared to walking the entire list of waiters in a single dedicated
 361         * bottom-half, we reduce the latency of the first waiter by avoiding
 362         * a context switch, but incur additional coherent seqno reads when
 363         * following the chain of request breadcrumbs. Since it is most likely
 364         * that we have a single client waiting on each seqno, then reducing
 365         * the overhead of waking that client is much preferred.
 366         */
 367        struct intel_breadcrumbs {
 368                spinlock_t irq_lock; /* protects irq_*; irqsafe */
 369                struct intel_wait *irq_wait; /* oldest waiter by retirement */
 370
 371                spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
 372                struct rb_root waiters; /* sorted by retirement, priority */
 373                struct list_head signals; /* sorted by retirement */
 374                struct task_struct *signaler; /* used for fence signalling */
 375
 376                struct timer_list fake_irq; /* used after a missed interrupt */
 377                struct timer_list hangcheck; /* detect missed interrupts */
 378
 379                unsigned int hangcheck_interrupts;
 380                unsigned int irq_enabled;
 381
 382                bool irq_armed : 1;
 383                I915_SELFTEST_DECLARE(bool mock : 1);
 384        } breadcrumbs;
 385
 386        struct {
 387                /**
 388                 * @enable: Bitmask of enable sample events on this engine.
 389                 *
 390                 * Bits correspond to sample event types, for instance
 391                 * I915_SAMPLE_QUEUED is bit 0 etc.
 392                 */
 393                u32 enable;
 394                /**
 395                 * @enable_count: Reference count for the enabled samplers.
 396                 *
 397                 * Index number corresponds to the bit number from @enable.
 398                 */
 399                unsigned int enable_count[I915_PMU_SAMPLE_BITS];
 400                /**
 401                 * @sample: Counter values for sampling events.
 402                 *
 403                 * Our internal timer stores the current counters in this field.
 404                 */
 405#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
 406                struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
 407        } pmu;
 408
 409        /*
 410         * A pool of objects to use as shadow copies of client batch buffers
 411         * when the command parser is enabled. Prevents the client from
 412         * modifying the batch contents after software parsing.
 413         */
 414        struct i915_gem_batch_pool batch_pool;
 415
 416        struct intel_hw_status_page status_page;
 417        struct i915_ctx_workarounds wa_ctx;
 418        struct i915_vma *scratch;
 419
 420        u32             irq_keep_mask; /* always keep these interrupts */
 421        u32             irq_enable_mask; /* bitmask to enable ring interrupt */
 422        void            (*irq_enable)(struct intel_engine_cs *engine);
 423        void            (*irq_disable)(struct intel_engine_cs *engine);
 424
 425        int             (*init_hw)(struct intel_engine_cs *engine);
 426        void            (*reset_hw)(struct intel_engine_cs *engine,
 427                                    struct i915_request *rq);
 428
 429        void            (*park)(struct intel_engine_cs *engine);
 430        void            (*unpark)(struct intel_engine_cs *engine);
 431
 432        void            (*set_default_submission)(struct intel_engine_cs *engine);
 433
 434        struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
 435                                          struct i915_gem_context *ctx);
 436        void            (*context_unpin)(struct intel_engine_cs *engine,
 437                                         struct i915_gem_context *ctx);
 438        int             (*request_alloc)(struct i915_request *rq);
 439        int             (*init_context)(struct i915_request *rq);
 440
 441        int             (*emit_flush)(struct i915_request *request, u32 mode);
 442#define EMIT_INVALIDATE BIT(0)
 443#define EMIT_FLUSH      BIT(1)
 444#define EMIT_BARRIER    (EMIT_INVALIDATE | EMIT_FLUSH)
 445        int             (*emit_bb_start)(struct i915_request *rq,
 446                                         u64 offset, u32 length,
 447                                         unsigned int dispatch_flags);
 448#define I915_DISPATCH_SECURE BIT(0)
 449#define I915_DISPATCH_PINNED BIT(1)
 450#define I915_DISPATCH_RS     BIT(2)
 451        void            (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
 452        int             emit_breadcrumb_sz;
 453
 454        /* Pass the request to the hardware queue (e.g. directly into
 455         * the legacy ringbuffer or to the end of an execlist).
 456         *
 457         * This is called from an atomic context with irqs disabled; must
 458         * be irq safe.
 459         */
 460        void            (*submit_request)(struct i915_request *rq);
 461
 462        /* Call when the priority on a request has changed and it and its
 463         * dependencies may need rescheduling. Note the request itself may
 464         * not be ready to run!
 465         *
 466         * Called under the struct_mutex.
 467         */
 468        void            (*schedule)(struct i915_request *request,
 469                                    const struct i915_sched_attr *attr);
 470
 471        /*
 472         * Cancel all requests on the hardware, or queued for execution.
 473         * This should only cancel the ready requests that have been
 474         * submitted to the engine (via the engine->submit_request callback).
 475         * This is called when marking the device as wedged.
 476         */
 477        void            (*cancel_requests)(struct intel_engine_cs *engine);
 478
 479        /* Some chipsets are not quite as coherent as advertised and need
 480         * an expensive kick to force a true read of the up-to-date seqno.
 481         * However, the up-to-date seqno is not always required and the last
 482         * seen value is good enough. Note that the seqno will always be
 483         * monotonic, even if not coherent.
 484         */
 485        void            (*irq_seqno_barrier)(struct intel_engine_cs *engine);
 486        void            (*cleanup)(struct intel_engine_cs *engine);
 487
 488        /* GEN8 signal/wait table - never trust comments!
 489         *        signal to     signal to    signal to   signal to      signal to
 490         *          RCS            VCS          BCS        VECS          VCS2
 491         *      --------------------------------------------------------------------
 492         *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
 493         *      |-------------------------------------------------------------------
 494         *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
 495         *      |-------------------------------------------------------------------
 496         *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
 497         *      |-------------------------------------------------------------------
 498         * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
 499         *      |-------------------------------------------------------------------
 500         * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
 501         *      |-------------------------------------------------------------------
 502         *
 503         * Generalization:
 504         *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
 505         *  ie. transpose of g(x, y)
 506         *
 507         *       sync from      sync from    sync from    sync from     sync from
 508         *          RCS            VCS          BCS        VECS          VCS2
 509         *      --------------------------------------------------------------------
 510         *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
 511         *      |-------------------------------------------------------------------
 512         *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
 513         *      |-------------------------------------------------------------------
 514         *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
 515         *      |-------------------------------------------------------------------
 516         * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
 517         *      |-------------------------------------------------------------------
 518         * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
 519         *      |-------------------------------------------------------------------
 520         *
 521         * Generalization:
 522         *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
 523         *  ie. transpose of f(x, y)
 524         */
 525        struct {
 526#define GEN6_SEMAPHORE_LAST     VECS_HW
 527#define GEN6_NUM_SEMAPHORES     (GEN6_SEMAPHORE_LAST + 1)
 528#define GEN6_SEMAPHORES_MASK    GENMASK(GEN6_SEMAPHORE_LAST, 0)
 529                struct {
 530                        /* our mbox written by others */
 531                        u32             wait[GEN6_NUM_SEMAPHORES];
 532                        /* mboxes this ring signals to */
 533                        i915_reg_t      signal[GEN6_NUM_SEMAPHORES];
 534                } mbox;
 535
 536                /* AKA wait() */
 537                int     (*sync_to)(struct i915_request *rq,
 538                                   struct i915_request *signal);
 539                u32     *(*signal)(struct i915_request *rq, u32 *cs);
 540        } semaphore;
 541
 542        struct intel_engine_execlists execlists;
 543
 544        /* Contexts are pinned whilst they are active on the GPU. The last
 545         * context executed remains active whilst the GPU is idle - the
 546         * switch away and write to the context object only occurs on the
 547         * next execution.  Contexts are only unpinned on retirement of the
 548         * following request ensuring that we can always write to the object
 549         * on the context switch even after idling. Across suspend, we switch
 550         * to the kernel context and trash it as the save may not happen
 551         * before the hardware is powered down.
 552         */
 553        struct i915_gem_context *last_retired_context;
 554
 555        /* We track the current MI_SET_CONTEXT in order to eliminate
 556         * redudant context switches. This presumes that requests are not
 557         * reordered! Or when they are the tracking is updated along with
 558         * the emission of individual requests into the legacy command
 559         * stream (ring).
 560         */
 561        struct i915_gem_context *legacy_active_context;
 562        struct i915_hw_ppgtt *legacy_active_ppgtt;
 563
 564        /* status_notifier: list of callbacks for context-switch changes */
 565        struct atomic_notifier_head context_status_notifier;
 566
 567        struct intel_engine_hangcheck hangcheck;
 568
 569#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
 570#define I915_ENGINE_SUPPORTS_STATS   BIT(1)
 571#define I915_ENGINE_HAS_PREEMPTION   BIT(2)
 572        unsigned int flags;
 573
 574        /*
 575         * Table of commands the command parser needs to know about
 576         * for this engine.
 577         */
 578        DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
 579
 580        /*
 581         * Table of registers allowed in commands that read/write registers.
 582         */
 583        const struct drm_i915_reg_table *reg_tables;
 584        int reg_table_count;
 585
 586        /*
 587         * Returns the bitmask for the length field of the specified command.
 588         * Return 0 for an unrecognized/invalid command.
 589         *
 590         * If the command parser finds an entry for a command in the engine's
 591         * cmd_tables, it gets the command's length based on the table entry.
 592         * If not, it calls this function to determine the per-engine length
 593         * field encoding for the command (i.e. different opcode ranges use
 594         * certain bits to encode the command length in the header).
 595         */
 596        u32 (*get_cmd_length_mask)(u32 cmd_header);
 597
 598        struct {
 599                /**
 600                 * @lock: Lock protecting the below fields.
 601                 */
 602                seqlock_t lock;
 603                /**
 604                 * @enabled: Reference count indicating number of listeners.
 605                 */
 606                unsigned int enabled;
 607                /**
 608                 * @active: Number of contexts currently scheduled in.
 609                 */
 610                unsigned int active;
 611                /**
 612                 * @enabled_at: Timestamp when busy stats were enabled.
 613                 */
 614                ktime_t enabled_at;
 615                /**
 616                 * @start: Timestamp of the last idle to active transition.
 617                 *
 618                 * Idle is defined as active == 0, active is active > 0.
 619                 */
 620                ktime_t start;
 621                /**
 622                 * @total: Total time this engine was busy.
 623                 *
 624                 * Accumulated time not counting the most recent block in cases
 625                 * where engine is currently busy (active > 0).
 626                 */
 627                ktime_t total;
 628        } stats;
 629};
 630
 631static inline bool
 632intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
 633{
 634        return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
 635}
 636
 637static inline bool
 638intel_engine_supports_stats(const struct intel_engine_cs *engine)
 639{
 640        return engine->flags & I915_ENGINE_SUPPORTS_STATS;
 641}
 642
 643static inline bool
 644intel_engine_has_preemption(const struct intel_engine_cs *engine)
 645{
 646        return engine->flags & I915_ENGINE_HAS_PREEMPTION;
 647}
 648
 649static inline bool __execlists_need_preempt(int prio, int last)
 650{
 651        return prio > max(0, last);
 652}
 653
 654static inline void
 655execlists_set_active(struct intel_engine_execlists *execlists,
 656                     unsigned int bit)
 657{
 658        __set_bit(bit, (unsigned long *)&execlists->active);
 659}
 660
 661static inline bool
 662execlists_set_active_once(struct intel_engine_execlists *execlists,
 663                          unsigned int bit)
 664{
 665        return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
 666}
 667
 668static inline void
 669execlists_clear_active(struct intel_engine_execlists *execlists,
 670                       unsigned int bit)
 671{
 672        __clear_bit(bit, (unsigned long *)&execlists->active);
 673}
 674
 675static inline bool
 676execlists_is_active(const struct intel_engine_execlists *execlists,
 677                    unsigned int bit)
 678{
 679        return test_bit(bit, (unsigned long *)&execlists->active);
 680}
 681
 682void execlists_user_begin(struct intel_engine_execlists *execlists,
 683                          const struct execlist_port *port);
 684void execlists_user_end(struct intel_engine_execlists *execlists);
 685
 686void
 687execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
 688
 689void
 690execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
 691
 692static inline unsigned int
 693execlists_num_ports(const struct intel_engine_execlists * const execlists)
 694{
 695        return execlists->port_mask + 1;
 696}
 697
 698static inline struct execlist_port *
 699execlists_port_complete(struct intel_engine_execlists * const execlists,
 700                        struct execlist_port * const port)
 701{
 702        const unsigned int m = execlists->port_mask;
 703
 704        GEM_BUG_ON(port_index(port, execlists) != 0);
 705        GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
 706
 707        memmove(port, port + 1, m * sizeof(struct execlist_port));
 708        memset(port + m, 0, sizeof(struct execlist_port));
 709
 710        return port;
 711}
 712
 713static inline unsigned int
 714intel_engine_flag(const struct intel_engine_cs *engine)
 715{
 716        return BIT(engine->id);
 717}
 718
 719static inline u32
 720intel_read_status_page(const struct intel_engine_cs *engine, int reg)
 721{
 722        /* Ensure that the compiler doesn't optimize away the load. */
 723        return READ_ONCE(engine->status_page.page_addr[reg]);
 724}
 725
 726static inline void
 727intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 728{
 729        /* Writing into the status page should be done sparingly. Since
 730         * we do when we are uncertain of the device state, we take a bit
 731         * of extra paranoia to try and ensure that the HWS takes the value
 732         * we give and that it doesn't end up trapped inside the CPU!
 733         */
 734        if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
 735                mb();
 736                clflush(&engine->status_page.page_addr[reg]);
 737                engine->status_page.page_addr[reg] = value;
 738                clflush(&engine->status_page.page_addr[reg]);
 739                mb();
 740        } else {
 741                WRITE_ONCE(engine->status_page.page_addr[reg], value);
 742        }
 743}
 744
 745/*
 746 * Reads a dword out of the status page, which is written to from the command
 747 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 748 * MI_STORE_DATA_IMM.
 749 *
 750 * The following dwords have a reserved meaning:
 751 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 752 * 0x04: ring 0 head pointer
 753 * 0x05: ring 1 head pointer (915-class)
 754 * 0x06: ring 2 head pointer (915-class)
 755 * 0x10-0x1b: Context status DWords (GM45)
 756 * 0x1f: Last written status offset. (GM45)
 757 * 0x20-0x2f: Reserved (Gen6+)
 758 *
 759 * The area from dword 0x30 to 0x3ff is available for driver usage.
 760 */
 761#define I915_GEM_HWS_INDEX              0x30
 762#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 763#define I915_GEM_HWS_PREEMPT_INDEX      0x32
 764#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 765#define I915_GEM_HWS_SCRATCH_INDEX      0x40
 766#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 767
 768#define I915_HWS_CSB_BUF0_INDEX         0x10
 769#define I915_HWS_CSB_WRITE_INDEX        0x1f
 770#define CNL_HWS_CSB_WRITE_INDEX         0x2f
 771
 772struct intel_ring *
 773intel_engine_create_ring(struct intel_engine_cs *engine,
 774                         struct i915_timeline *timeline,
 775                         int size);
 776int intel_ring_pin(struct intel_ring *ring,
 777                   struct drm_i915_private *i915,
 778                   unsigned int offset_bias);
 779void intel_ring_reset(struct intel_ring *ring, u32 tail);
 780unsigned int intel_ring_update_space(struct intel_ring *ring);
 781void intel_ring_unpin(struct intel_ring *ring);
 782void intel_ring_free(struct intel_ring *ring);
 783
 784void intel_engine_stop(struct intel_engine_cs *engine);
 785void intel_engine_cleanup(struct intel_engine_cs *engine);
 786
 787void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 788
 789int __must_check intel_ring_cacheline_align(struct i915_request *rq);
 790
 791int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
 792u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
 793
 794static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
 795{
 796        /* Dummy function.
 797         *
 798         * This serves as a placeholder in the code so that the reader
 799         * can compare against the preceding intel_ring_begin() and
 800         * check that the number of dwords emitted matches the space
 801         * reserved for the command packet (i.e. the value passed to
 802         * intel_ring_begin()).
 803         */
 804        GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
 805}
 806
 807static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
 808{
 809        return pos & (ring->size - 1);
 810}
 811
 812static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 813{
 814        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
 815        u32 offset = addr - rq->ring->vaddr;
 816        GEM_BUG_ON(offset > rq->ring->size);
 817        return intel_ring_wrap(rq->ring, offset);
 818}
 819
 820static inline void
 821assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
 822{
 823        /* We could combine these into a single tail operation, but keeping
 824         * them as seperate tests will help identify the cause should one
 825         * ever fire.
 826         */
 827        GEM_BUG_ON(!IS_ALIGNED(tail, 8));
 828        GEM_BUG_ON(tail >= ring->size);
 829
 830        /*
 831         * "Ring Buffer Use"
 832         *      Gen2 BSpec "1. Programming Environment" / 1.4.4.6
 833         *      Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
 834         *      Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
 835         * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
 836         * same cacheline, the Head Pointer must not be greater than the Tail
 837         * Pointer."
 838         *
 839         * We use ring->head as the last known location of the actual RING_HEAD,
 840         * it may have advanced but in the worst case it is equally the same
 841         * as ring->head and so we should never program RING_TAIL to advance
 842         * into the same cacheline as ring->head.
 843         */
 844#define cacheline(a) round_down(a, CACHELINE_BYTES)
 845        GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
 846                   tail < ring->head);
 847#undef cacheline
 848}
 849
 850static inline unsigned int
 851intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
 852{
 853        /* Whilst writes to the tail are strictly order, there is no
 854         * serialisation between readers and the writers. The tail may be
 855         * read by i915_request_retire() just as it is being updated
 856         * by execlists, as although the breadcrumb is complete, the context
 857         * switch hasn't been seen.
 858         */
 859        assert_ring_tail_valid(ring, tail);
 860        ring->tail = tail;
 861        return tail;
 862}
 863
 864void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 865
 866void intel_engine_setup_common(struct intel_engine_cs *engine);
 867int intel_engine_init_common(struct intel_engine_cs *engine);
 868int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 869void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 870
 871int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 872int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 873int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 874int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 875
 876u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 877u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 878
 879static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 880{
 881        return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 882}
 883
 884static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 885{
 886        /* We are only peeking at the tail of the submit queue (and not the
 887         * queue itself) in order to gain a hint as to the current active
 888         * state of the engine. Callers are not expected to be taking
 889         * engine->timeline->lock, nor are they expected to be concerned
 890         * wtih serialising this hint with anything, so document it as
 891         * a hint and nothing more.
 892         */
 893        return READ_ONCE(engine->timeline.seqno);
 894}
 895
 896void intel_engine_get_instdone(struct intel_engine_cs *engine,
 897                               struct intel_instdone *instdone);
 898
 899/*
 900 * Arbitrary size for largest possible 'add request' sequence. The code paths
 901 * are complex and variable. Empirical measurement shows that the worst case
 902 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 903 * we need to allocate double the largest single packet within that emission
 904 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
 905 */
 906#define MIN_SPACE_FOR_ADD_REQUEST 336
 907
 908static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 909{
 910        return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
 911}
 912
 913static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
 914{
 915        return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
 916}
 917
 918/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
 919int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 920
 921static inline void intel_wait_init(struct intel_wait *wait,
 922                                   struct i915_request *rq)
 923{
 924        wait->tsk = current;
 925        wait->request = rq;
 926}
 927
 928static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
 929{
 930        wait->tsk = current;
 931        wait->seqno = seqno;
 932}
 933
 934static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
 935{
 936        return wait->seqno;
 937}
 938
 939static inline bool
 940intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
 941{
 942        wait->seqno = seqno;
 943        return intel_wait_has_seqno(wait);
 944}
 945
 946static inline bool
 947intel_wait_update_request(struct intel_wait *wait,
 948                          const struct i915_request *rq)
 949{
 950        return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
 951}
 952
 953static inline bool
 954intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
 955{
 956        return wait->seqno == seqno;
 957}
 958
 959static inline bool
 960intel_wait_check_request(const struct intel_wait *wait,
 961                         const struct i915_request *rq)
 962{
 963        return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
 964}
 965
 966static inline bool intel_wait_complete(const struct intel_wait *wait)
 967{
 968        return RB_EMPTY_NODE(&wait->node);
 969}
 970
 971bool intel_engine_add_wait(struct intel_engine_cs *engine,
 972                           struct intel_wait *wait);
 973void intel_engine_remove_wait(struct intel_engine_cs *engine,
 974                              struct intel_wait *wait);
 975bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
 976void intel_engine_cancel_signaling(struct i915_request *request);
 977
 978static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
 979{
 980        return READ_ONCE(engine->breadcrumbs.irq_wait);
 981}
 982
 983unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
 984#define ENGINE_WAKEUP_WAITER BIT(0)
 985#define ENGINE_WAKEUP_ASLEEP BIT(1)
 986
 987void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
 988void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
 989
 990void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
 991void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
 992
 993void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 994void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 995
 996static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
 997{
 998        memset(batch, 0, 6 * sizeof(u32));
 999
1000        batch[0] = GFX_OP_PIPE_CONTROL(6);
1001        batch[1] = flags;
1002        batch[2] = offset;
1003
1004        return batch + 6;
1005}
1006
1007static inline u32 *
1008gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
1009{
1010        /* We're using qword write, offset should be aligned to 8 bytes. */
1011        GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1012
1013        /* w/a for post sync ops following a GPGPU operation we
1014         * need a prior CS_STALL, which is emitted by the flush
1015         * following the batch.
1016         */
1017        *cs++ = GFX_OP_PIPE_CONTROL(6);
1018        *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
1019                PIPE_CONTROL_QW_WRITE;
1020        *cs++ = gtt_offset;
1021        *cs++ = 0;
1022        *cs++ = value;
1023        /* We're thrashing one dword of HWS. */
1024        *cs++ = 0;
1025
1026        return cs;
1027}
1028
1029static inline u32 *
1030gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
1031{
1032        /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1033        GEM_BUG_ON(gtt_offset & (1 << 5));
1034        /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
1035        GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1036
1037        *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1038        *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
1039        *cs++ = 0;
1040        *cs++ = value;
1041
1042        return cs;
1043}
1044
1045bool intel_engine_is_idle(struct intel_engine_cs *engine);
1046bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1047
1048bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
1049
1050void intel_engines_park(struct drm_i915_private *i915);
1051void intel_engines_unpark(struct drm_i915_private *i915);
1052
1053void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1054unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1055
1056bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1057
1058__printf(3, 4)
1059void intel_engine_dump(struct intel_engine_cs *engine,
1060                       struct drm_printer *m,
1061                       const char *header, ...);
1062
1063struct intel_engine_cs *
1064intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
1065
1066static inline void intel_engine_context_in(struct intel_engine_cs *engine)
1067{
1068        unsigned long flags;
1069
1070        if (READ_ONCE(engine->stats.enabled) == 0)
1071                return;
1072
1073        write_seqlock_irqsave(&engine->stats.lock, flags);
1074
1075        if (engine->stats.enabled > 0) {
1076                if (engine->stats.active++ == 0)
1077                        engine->stats.start = ktime_get();
1078                GEM_BUG_ON(engine->stats.active == 0);
1079        }
1080
1081        write_sequnlock_irqrestore(&engine->stats.lock, flags);
1082}
1083
1084static inline void intel_engine_context_out(struct intel_engine_cs *engine)
1085{
1086        unsigned long flags;
1087
1088        if (READ_ONCE(engine->stats.enabled) == 0)
1089                return;
1090
1091        write_seqlock_irqsave(&engine->stats.lock, flags);
1092
1093        if (engine->stats.enabled > 0) {
1094                ktime_t last;
1095
1096                if (engine->stats.active && --engine->stats.active == 0) {
1097                        /*
1098                         * Decrement the active context count and in case GPU
1099                         * is now idle add up to the running total.
1100                         */
1101                        last = ktime_sub(ktime_get(), engine->stats.start);
1102
1103                        engine->stats.total = ktime_add(engine->stats.total,
1104                                                        last);
1105                } else if (engine->stats.active == 0) {
1106                        /*
1107                         * After turning on engine stats, context out might be
1108                         * the first event in which case we account from the
1109                         * time stats gathering was turned on.
1110                         */
1111                        last = ktime_sub(ktime_get(), engine->stats.enabled_at);
1112
1113                        engine->stats.total = ktime_add(engine->stats.total,
1114                                                        last);
1115                }
1116        }
1117
1118        write_sequnlock_irqrestore(&engine->stats.lock, flags);
1119}
1120
1121int intel_enable_engine_stats(struct intel_engine_cs *engine);
1122void intel_disable_engine_stats(struct intel_engine_cs *engine);
1123
1124ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
1125
1126#endif /* _INTEL_RINGBUFFER_H_ */
1127