linux/drivers/gpu/drm/i915/intel_ringbuffer.h
<<
>>
Prefs
   1#ifndef _INTEL_RINGBUFFER_H_
   2#define _INTEL_RINGBUFFER_H_
   3
   4#include <linux/hashtable.h>
   5#include "i915_gem_batch_pool.h"
   6#include "i915_gem_request.h"
   7#include "i915_gem_timeline.h"
   8
   9#define I915_CMD_HASH_ORDER 9
  10
  11/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  12 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
  13 * to give some inclination as to some of the magic values used in the various
  14 * workarounds!
  15 */
  16#define CACHELINE_BYTES 64
  17#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
  18
  19/*
  20 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
  21 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
  22 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
  23 *
  24 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
  25 * cacheline, the Head Pointer must not be greater than the Tail
  26 * Pointer."
  27 */
  28#define I915_RING_FREE_SPACE 64
  29
  30struct intel_hw_status_page {
  31        struct i915_vma *vma;
  32        u32 *page_addr;
  33        u32 ggtt_offset;
  34};
  35
  36#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
  37#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
  38
  39#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
  40#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
  41
  42#define I915_READ_HEAD(engine)  I915_READ(RING_HEAD((engine)->mmio_base))
  43#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
  44
  45#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
  46#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
  47
  48#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
  49#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
  50
  51#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
  52#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
  53
  54/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  55 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  56 */
  57#define gen8_semaphore_seqno_size sizeof(uint64_t)
  58#define GEN8_SEMAPHORE_OFFSET(__from, __to)                          \
  59        (((__from) * I915_NUM_ENGINES  + (__to)) * gen8_semaphore_seqno_size)
  60#define GEN8_SIGNAL_OFFSET(__ring, to)                       \
  61        (dev_priv->semaphore->node.start + \
  62         GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
  63#define GEN8_WAIT_OFFSET(__ring, from)                       \
  64        (dev_priv->semaphore->node.start + \
  65         GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
  66
  67enum intel_engine_hangcheck_action {
  68        ENGINE_IDLE = 0,
  69        ENGINE_WAIT,
  70        ENGINE_ACTIVE_SEQNO,
  71        ENGINE_ACTIVE_HEAD,
  72        ENGINE_ACTIVE_SUBUNITS,
  73        ENGINE_WAIT_KICK,
  74        ENGINE_DEAD,
  75};
  76
  77static inline const char *
  78hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
  79{
  80        switch (a) {
  81        case ENGINE_IDLE:
  82                return "idle";
  83        case ENGINE_WAIT:
  84                return "wait";
  85        case ENGINE_ACTIVE_SEQNO:
  86                return "active seqno";
  87        case ENGINE_ACTIVE_HEAD:
  88                return "active head";
  89        case ENGINE_ACTIVE_SUBUNITS:
  90                return "active subunits";
  91        case ENGINE_WAIT_KICK:
  92                return "wait kick";
  93        case ENGINE_DEAD:
  94                return "dead";
  95        }
  96
  97        return "unknown";
  98}
  99
 100#define I915_MAX_SLICES 3
 101#define I915_MAX_SUBSLICES 3
 102
 103#define instdone_slice_mask(dev_priv__) \
 104        (INTEL_GEN(dev_priv__) == 7 ? \
 105         1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
 106
 107#define instdone_subslice_mask(dev_priv__) \
 108        (INTEL_GEN(dev_priv__) == 7 ? \
 109         1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
 110
 111#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
 112        for ((slice__) = 0, (subslice__) = 0; \
 113             (slice__) < I915_MAX_SLICES; \
 114             (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
 115               (slice__) += ((subslice__) == 0)) \
 116                for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
 117                            (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
 118
 119struct intel_instdone {
 120        u32 instdone;
 121        /* The following exist only in the RCS engine */
 122        u32 slice_common;
 123        u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 124        u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 125};
 126
 127struct intel_engine_hangcheck {
 128        u64 acthd;
 129        u32 seqno;
 130        enum intel_engine_hangcheck_action action;
 131        unsigned long action_timestamp;
 132        int deadlock;
 133        struct intel_instdone instdone;
 134        bool stalled;
 135};
 136
 137struct intel_ring {
 138        struct i915_vma *vma;
 139        void *vaddr;
 140
 141        struct intel_engine_cs *engine;
 142
 143        struct list_head request_list;
 144
 145        u32 head;
 146        u32 tail;
 147        int space;
 148        int size;
 149        int effective_size;
 150
 151        /** We track the position of the requests in the ring buffer, and
 152         * when each is retired we increment last_retired_head as the GPU
 153         * must have finished processing the request and so we know we
 154         * can advance the ringbuffer up to that position.
 155         *
 156         * last_retired_head is set to -1 after the value is consumed so
 157         * we can detect new retirements.
 158         */
 159        u32 last_retired_head;
 160};
 161
 162struct i915_gem_context;
 163struct drm_i915_reg_table;
 164
 165/*
 166 * we use a single page to load ctx workarounds so all of these
 167 * values are referred in terms of dwords
 168 *
 169 * struct i915_wa_ctx_bb:
 170 *  offset: specifies batch starting position, also helpful in case
 171 *    if we want to have multiple batches at different offsets based on
 172 *    some criteria. It is not a requirement at the moment but provides
 173 *    an option for future use.
 174 *  size: size of the batch in DWORDS
 175 */
 176struct i915_ctx_workarounds {
 177        struct i915_wa_ctx_bb {
 178                u32 offset;
 179                u32 size;
 180        } indirect_ctx, per_ctx;
 181        struct i915_vma *vma;
 182};
 183
 184struct drm_i915_gem_request;
 185struct intel_render_state;
 186
 187struct intel_engine_cs {
 188        struct drm_i915_private *i915;
 189        const char      *name;
 190        enum intel_engine_id {
 191                RCS = 0,
 192                BCS,
 193                VCS,
 194                VCS2,   /* Keep instances of the same type engine together. */
 195                VECS
 196        } id;
 197#define _VCS(n) (VCS + (n))
 198        unsigned int exec_id;
 199        enum intel_engine_hw_id {
 200                RCS_HW = 0,
 201                VCS_HW,
 202                BCS_HW,
 203                VECS_HW,
 204                VCS2_HW
 205        } hw_id;
 206        enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
 207        u32             mmio_base;
 208        unsigned int irq_shift;
 209        struct intel_ring *buffer;
 210        struct intel_timeline *timeline;
 211
 212        struct intel_render_state *render_state;
 213
 214        /* Rather than have every client wait upon all user interrupts,
 215         * with the herd waking after every interrupt and each doing the
 216         * heavyweight seqno dance, we delegate the task (of being the
 217         * bottom-half of the user interrupt) to the first client. After
 218         * every interrupt, we wake up one client, who does the heavyweight
 219         * coherent seqno read and either goes back to sleep (if incomplete),
 220         * or wakes up all the completed clients in parallel, before then
 221         * transferring the bottom-half status to the next client in the queue.
 222         *
 223         * Compared to walking the entire list of waiters in a single dedicated
 224         * bottom-half, we reduce the latency of the first waiter by avoiding
 225         * a context switch, but incur additional coherent seqno reads when
 226         * following the chain of request breadcrumbs. Since it is most likely
 227         * that we have a single client waiting on each seqno, then reducing
 228         * the overhead of waking that client is much preferred.
 229         */
 230        struct intel_breadcrumbs {
 231                struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
 232                bool irq_posted;
 233
 234                spinlock_t lock; /* protects the lists of requests; irqsafe */
 235                struct rb_root waiters; /* sorted by retirement, priority */
 236                struct rb_root signals; /* sorted by retirement */
 237                struct intel_wait *first_wait; /* oldest waiter by retirement */
 238                struct task_struct *signaler; /* used for fence signalling */
 239                struct drm_i915_gem_request *first_signal;
 240                struct timer_list fake_irq; /* used after a missed interrupt */
 241                struct timer_list hangcheck; /* detect missed interrupts */
 242
 243                unsigned long timeout;
 244
 245                bool irq_enabled : 1;
 246                bool rpm_wakelock : 1;
 247        } breadcrumbs;
 248
 249        /*
 250         * A pool of objects to use as shadow copies of client batch buffers
 251         * when the command parser is enabled. Prevents the client from
 252         * modifying the batch contents after software parsing.
 253         */
 254        struct i915_gem_batch_pool batch_pool;
 255
 256        struct intel_hw_status_page status_page;
 257        struct i915_ctx_workarounds wa_ctx;
 258        struct i915_vma *scratch;
 259
 260        u32             irq_keep_mask; /* always keep these interrupts */
 261        u32             irq_enable_mask; /* bitmask to enable ring interrupt */
 262        void            (*irq_enable)(struct intel_engine_cs *engine);
 263        void            (*irq_disable)(struct intel_engine_cs *engine);
 264
 265        int             (*init_hw)(struct intel_engine_cs *engine);
 266        void            (*reset_hw)(struct intel_engine_cs *engine,
 267                                    struct drm_i915_gem_request *req);
 268
 269        int             (*context_pin)(struct intel_engine_cs *engine,
 270                                       struct i915_gem_context *ctx);
 271        void            (*context_unpin)(struct intel_engine_cs *engine,
 272                                         struct i915_gem_context *ctx);
 273        int             (*request_alloc)(struct drm_i915_gem_request *req);
 274        int             (*init_context)(struct drm_i915_gem_request *req);
 275
 276        int             (*emit_flush)(struct drm_i915_gem_request *request,
 277                                      u32 mode);
 278#define EMIT_INVALIDATE BIT(0)
 279#define EMIT_FLUSH      BIT(1)
 280#define EMIT_BARRIER    (EMIT_INVALIDATE | EMIT_FLUSH)
 281        int             (*emit_bb_start)(struct drm_i915_gem_request *req,
 282                                         u64 offset, u32 length,
 283                                         unsigned int dispatch_flags);
 284#define I915_DISPATCH_SECURE BIT(0)
 285#define I915_DISPATCH_PINNED BIT(1)
 286#define I915_DISPATCH_RS     BIT(2)
 287        void            (*emit_breadcrumb)(struct drm_i915_gem_request *req,
 288                                           u32 *out);
 289        int             emit_breadcrumb_sz;
 290
 291        /* Pass the request to the hardware queue (e.g. directly into
 292         * the legacy ringbuffer or to the end of an execlist).
 293         *
 294         * This is called from an atomic context with irqs disabled; must
 295         * be irq safe.
 296         */
 297        void            (*submit_request)(struct drm_i915_gem_request *req);
 298
 299        /* Call when the priority on a request has changed and it and its
 300         * dependencies may need rescheduling. Note the request itself may
 301         * not be ready to run!
 302         *
 303         * Called under the struct_mutex.
 304         */
 305        void            (*schedule)(struct drm_i915_gem_request *request,
 306                                    int priority);
 307
 308        /* Some chipsets are not quite as coherent as advertised and need
 309         * an expensive kick to force a true read of the up-to-date seqno.
 310         * However, the up-to-date seqno is not always required and the last
 311         * seen value is good enough. Note that the seqno will always be
 312         * monotonic, even if not coherent.
 313         */
 314        void            (*irq_seqno_barrier)(struct intel_engine_cs *engine);
 315        void            (*cleanup)(struct intel_engine_cs *engine);
 316
 317        /* GEN8 signal/wait table - never trust comments!
 318         *        signal to     signal to    signal to   signal to      signal to
 319         *          RCS            VCS          BCS        VECS          VCS2
 320         *      --------------------------------------------------------------------
 321         *  RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
 322         *      |-------------------------------------------------------------------
 323         *  VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
 324         *      |-------------------------------------------------------------------
 325         *  BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
 326         *      |-------------------------------------------------------------------
 327         * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) |  NOP (0x90) | VCS2 (0x98) |
 328         *      |-------------------------------------------------------------------
 329         * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP  (0xc0) |
 330         *      |-------------------------------------------------------------------
 331         *
 332         * Generalization:
 333         *  f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
 334         *  ie. transpose of g(x, y)
 335         *
 336         *       sync from      sync from    sync from    sync from     sync from
 337         *          RCS            VCS          BCS        VECS          VCS2
 338         *      --------------------------------------------------------------------
 339         *  RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
 340         *      |-------------------------------------------------------------------
 341         *  VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
 342         *      |-------------------------------------------------------------------
 343         *  BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
 344         *      |-------------------------------------------------------------------
 345         * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) |  NOP (0x90) | VCS2 (0xb8) |
 346         *      |-------------------------------------------------------------------
 347         * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) |  NOP (0xc0) |
 348         *      |-------------------------------------------------------------------
 349         *
 350         * Generalization:
 351         *  g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
 352         *  ie. transpose of f(x, y)
 353         */
 354        struct {
 355                union {
 356#define GEN6_SEMAPHORE_LAST     VECS_HW
 357#define GEN6_NUM_SEMAPHORES     (GEN6_SEMAPHORE_LAST + 1)
 358#define GEN6_SEMAPHORES_MASK    GENMASK(GEN6_SEMAPHORE_LAST, 0)
 359                        struct {
 360                                /* our mbox written by others */
 361                                u32             wait[GEN6_NUM_SEMAPHORES];
 362                                /* mboxes this ring signals to */
 363                                i915_reg_t      signal[GEN6_NUM_SEMAPHORES];
 364                        } mbox;
 365                        u64             signal_ggtt[I915_NUM_ENGINES];
 366                };
 367
 368                /* AKA wait() */
 369                int     (*sync_to)(struct drm_i915_gem_request *req,
 370                                   struct drm_i915_gem_request *signal);
 371                u32     *(*signal)(struct drm_i915_gem_request *req, u32 *out);
 372        } semaphore;
 373
 374        /* Execlists */
 375        struct tasklet_struct irq_tasklet;
 376        struct execlist_port {
 377                struct drm_i915_gem_request *request;
 378                unsigned int count;
 379        } execlist_port[2];
 380        struct rb_root execlist_queue;
 381        struct rb_node *execlist_first;
 382        unsigned int fw_domains;
 383        bool disable_lite_restore_wa;
 384        bool preempt_wa;
 385        u32 ctx_desc_template;
 386
 387        /* Contexts are pinned whilst they are active on the GPU. The last
 388         * context executed remains active whilst the GPU is idle - the
 389         * switch away and write to the context object only occurs on the
 390         * next execution.  Contexts are only unpinned on retirement of the
 391         * following request ensuring that we can always write to the object
 392         * on the context switch even after idling. Across suspend, we switch
 393         * to the kernel context and trash it as the save may not happen
 394         * before the hardware is powered down.
 395         */
 396        struct i915_gem_context *last_retired_context;
 397
 398        /* We track the current MI_SET_CONTEXT in order to eliminate
 399         * redudant context switches. This presumes that requests are not
 400         * reordered! Or when they are the tracking is updated along with
 401         * the emission of individual requests into the legacy command
 402         * stream (ring).
 403         */
 404        struct i915_gem_context *legacy_active_context;
 405
 406        /* status_notifier: list of callbacks for context-switch changes */
 407        struct atomic_notifier_head context_status_notifier;
 408
 409        struct intel_engine_hangcheck hangcheck;
 410
 411        bool needs_cmd_parser;
 412
 413        /*
 414         * Table of commands the command parser needs to know about
 415         * for this engine.
 416         */
 417        DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
 418
 419        /*
 420         * Table of registers allowed in commands that read/write registers.
 421         */
 422        const struct drm_i915_reg_table *reg_tables;
 423        int reg_table_count;
 424
 425        /*
 426         * Returns the bitmask for the length field of the specified command.
 427         * Return 0 for an unrecognized/invalid command.
 428         *
 429         * If the command parser finds an entry for a command in the engine's
 430         * cmd_tables, it gets the command's length based on the table entry.
 431         * If not, it calls this function to determine the per-engine length
 432         * field encoding for the command (i.e. different opcode ranges use
 433         * certain bits to encode the command length in the header).
 434         */
 435        u32 (*get_cmd_length_mask)(u32 cmd_header);
 436};
 437
 438static inline unsigned
 439intel_engine_flag(const struct intel_engine_cs *engine)
 440{
 441        return 1 << engine->id;
 442}
 443
 444static inline void
 445intel_flush_status_page(struct intel_engine_cs *engine, int reg)
 446{
 447        mb();
 448        clflush(&engine->status_page.page_addr[reg]);
 449        mb();
 450}
 451
 452static inline u32
 453intel_read_status_page(struct intel_engine_cs *engine, int reg)
 454{
 455        /* Ensure that the compiler doesn't optimize away the load. */
 456        return READ_ONCE(engine->status_page.page_addr[reg]);
 457}
 458
 459static inline void
 460intel_write_status_page(struct intel_engine_cs *engine,
 461                        int reg, u32 value)
 462{
 463        engine->status_page.page_addr[reg] = value;
 464}
 465
 466/*
 467 * Reads a dword out of the status page, which is written to from the command
 468 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 469 * MI_STORE_DATA_IMM.
 470 *
 471 * The following dwords have a reserved meaning:
 472 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 473 * 0x04: ring 0 head pointer
 474 * 0x05: ring 1 head pointer (915-class)
 475 * 0x06: ring 2 head pointer (915-class)
 476 * 0x10-0x1b: Context status DWords (GM45)
 477 * 0x1f: Last written status offset. (GM45)
 478 * 0x20-0x2f: Reserved (Gen6+)
 479 *
 480 * The area from dword 0x30 to 0x3ff is available for driver usage.
 481 */
 482#define I915_GEM_HWS_INDEX              0x30
 483#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 484#define I915_GEM_HWS_SCRATCH_INDEX      0x40
 485#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 486
 487struct intel_ring *
 488intel_engine_create_ring(struct intel_engine_cs *engine, int size);
 489int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
 490void intel_ring_unpin(struct intel_ring *ring);
 491void intel_ring_free(struct intel_ring *ring);
 492
 493void intel_engine_stop(struct intel_engine_cs *engine);
 494void intel_engine_cleanup(struct intel_engine_cs *engine);
 495
 496void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 497
 498int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 499int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 500
 501static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
 502{
 503        *(uint32_t *)(ring->vaddr + ring->tail) = data;
 504        ring->tail += 4;
 505}
 506
 507static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 508{
 509        intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 510}
 511
 512static inline void intel_ring_advance(struct intel_ring *ring)
 513{
 514        /* Dummy function.
 515         *
 516         * This serves as a placeholder in the code so that the reader
 517         * can compare against the preceding intel_ring_begin() and
 518         * check that the number of dwords emitted matches the space
 519         * reserved for the command packet (i.e. the value passed to
 520         * intel_ring_begin()).
 521         */
 522}
 523
 524static inline u32
 525intel_ring_wrap(const struct intel_ring *ring, u32 pos)
 526{
 527        return pos & (ring->size - 1);
 528}
 529
 530static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
 531{
 532        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
 533        u32 offset = addr - ring->vaddr;
 534        return intel_ring_wrap(ring, offset);
 535}
 536
 537int __intel_ring_space(int head, int tail, int size);
 538void intel_ring_update_space(struct intel_ring *ring);
 539
 540void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 541
 542void intel_engine_setup_common(struct intel_engine_cs *engine);
 543int intel_engine_init_common(struct intel_engine_cs *engine);
 544int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 545void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 546
 547int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 548int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 549int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
 550int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 551int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 552
 553u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 554u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
 555
 556static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 557{
 558        return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
 559}
 560
 561static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 562{
 563        /* We are only peeking at the tail of the submit queue (and not the
 564         * queue itself) in order to gain a hint as to the current active
 565         * state of the engine. Callers are not expected to be taking
 566         * engine->timeline->lock, nor are they expected to be concerned
 567         * wtih serialising this hint with anything, so document it as
 568         * a hint and nothing more.
 569         */
 570        return READ_ONCE(engine->timeline->last_submitted_seqno);
 571}
 572
 573int init_workarounds_ring(struct intel_engine_cs *engine);
 574
 575void intel_engine_get_instdone(struct intel_engine_cs *engine,
 576                               struct intel_instdone *instdone);
 577
 578/*
 579 * Arbitrary size for largest possible 'add request' sequence. The code paths
 580 * are complex and variable. Empirical measurement shows that the worst case
 581 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
 582 * we need to allocate double the largest single packet within that emission
 583 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
 584 */
 585#define MIN_SPACE_FOR_ADD_REQUEST 336
 586
 587static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 588{
 589        return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
 590}
 591
 592/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
 593int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 594
 595static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
 596{
 597        wait->tsk = current;
 598        wait->seqno = seqno;
 599}
 600
 601static inline bool intel_wait_complete(const struct intel_wait *wait)
 602{
 603        return RB_EMPTY_NODE(&wait->node);
 604}
 605
 606bool intel_engine_add_wait(struct intel_engine_cs *engine,
 607                           struct intel_wait *wait);
 608void intel_engine_remove_wait(struct intel_engine_cs *engine,
 609                              struct intel_wait *wait);
 610void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
 611
 612static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
 613{
 614        return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
 615}
 616
 617static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
 618{
 619        bool wakeup = false;
 620
 621        /* Note that for this not to dangerously chase a dangling pointer,
 622         * we must hold the rcu_read_lock here.
 623         *
 624         * Also note that tsk is likely to be in !TASK_RUNNING state so an
 625         * early test for tsk->state != TASK_RUNNING before wake_up_process()
 626         * is unlikely to be beneficial.
 627         */
 628        if (intel_engine_has_waiter(engine)) {
 629                struct task_struct *tsk;
 630
 631                rcu_read_lock();
 632                tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
 633                if (tsk)
 634                        wakeup = wake_up_process(tsk);
 635                rcu_read_unlock();
 636        }
 637
 638        return wakeup;
 639}
 640
 641void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 642void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 643unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
 644
 645#endif /* _INTEL_RINGBUFFER_H_ */
 646