linux/drivers/gpu/drm/i915/i915_request.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2018 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#ifndef I915_REQUEST_H
  26#define I915_REQUEST_H
  27
  28#include <linux/dma-fence.h>
  29#include <linux/hrtimer.h>
  30#include <linux/irq_work.h>
  31#include <linux/llist.h>
  32#include <linux/lockdep.h>
  33
  34#include "gem/i915_gem_context_types.h"
  35#include "gt/intel_context_types.h"
  36#include "gt/intel_engine_types.h"
  37#include "gt/intel_timeline_types.h"
  38
  39#include "i915_gem.h"
  40#include "i915_scheduler.h"
  41#include "i915_selftest.h"
  42#include "i915_sw_fence.h"
  43
  44#include <uapi/drm/i915_drm.h>
  45
  46struct drm_file;
  47struct drm_i915_gem_object;
  48struct drm_printer;
  49struct i915_request;
  50
  51struct i915_capture_list {
  52        struct i915_capture_list *next;
  53        struct i915_vma *vma;
  54};
  55
  56#define RQ_TRACE(rq, fmt, ...) do {                                     \
  57        const struct i915_request *rq__ = (rq);                         \
  58        ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,  \
  59                     rq__->fence.context, rq__->fence.seqno,            \
  60                     hwsp_seqno(rq__), ##__VA_ARGS__);                  \
  61} while (0)
  62
  63enum {
  64        /*
  65         * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
  66         *
  67         * Set by __i915_request_submit() on handing over to HW, and cleared
  68         * by __i915_request_unsubmit() if we preempt this request.
  69         *
  70         * Finally cleared for consistency on retiring the request, when
  71         * we know the HW is no longer running this request.
  72         *
  73         * See i915_request_is_active()
  74         */
  75        I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
  76
  77        /*
  78         * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
  79         *
  80         * Using the scheduler, when a request is ready for execution it is put
  81         * into the priority queue, and removed from that queue when transferred
  82         * to the HW runlists. We want to track its membership within the
  83         * priority queue so that we can easily check before rescheduling.
  84         *
  85         * See i915_request_in_priority_queue()
  86         */
  87        I915_FENCE_FLAG_PQUEUE,
  88
  89        /*
  90         * I915_FENCE_FLAG_HOLD - this request is currently on hold
  91         *
  92         * This request has been suspended, pending an ongoing investigation.
  93         */
  94        I915_FENCE_FLAG_HOLD,
  95
  96        /*
  97         * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
  98         * breadcrumb that marks the end of semaphore waits and start of the
  99         * user payload.
 100         */
 101        I915_FENCE_FLAG_INITIAL_BREADCRUMB,
 102
 103        /*
 104         * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
 105         *
 106         * Internal bookkeeping used by the breadcrumb code to track when
 107         * a request is on the various signal_list.
 108         */
 109        I915_FENCE_FLAG_SIGNAL,
 110
 111        /*
 112         * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
 113         *
 114         * The execution of some requests should not be interrupted. This is
 115         * a sensitive operation as it makes the request super important,
 116         * blocking other higher priority work. Abuse of this flag will
 117         * lead to quality of service issues.
 118         */
 119        I915_FENCE_FLAG_NOPREEMPT,
 120
 121        /*
 122         * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
 123         *
 124         * A high priority sentinel request may be submitted to clear the
 125         * submission queue. As it will be the only request in-flight, upon
 126         * execution all other active requests will have been preempted and
 127         * unsubmitted. This preemptive pulse is used to re-evaluate the
 128         * in-flight requests, particularly in cases where an active context
 129         * is banned and those active requests need to be cancelled.
 130         */
 131        I915_FENCE_FLAG_SENTINEL,
 132
 133        /*
 134         * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
 135         *
 136         * Some requests are more important than others! In particular, a
 137         * request that the user is waiting on is typically required for
 138         * interactive latency, for which we want to minimise by upclocking
 139         * the GPU. Here we track such boost requests on a per-request basis.
 140         */
 141        I915_FENCE_FLAG_BOOST,
 142};
 143
 144/**
 145 * Request queue structure.
 146 *
 147 * The request queue allows us to note sequence numbers that have been emitted
 148 * and may be associated with active buffers to be retired.
 149 *
 150 * By keeping this list, we can avoid having to do questionable sequence
 151 * number comparisons on buffer last_read|write_seqno. It also allows an
 152 * emission time to be associated with the request for tracking how far ahead
 153 * of the GPU the submission is.
 154 *
 155 * When modifying this structure be very aware that we perform a lockless
 156 * RCU lookup of it that may race against reallocation of the struct
 157 * from the slab freelist. We intentionally do not zero the structure on
 158 * allocation so that the lookup can use the dangling pointers (and is
 159 * cogniscent that those pointers may be wrong). Instead, everything that
 160 * needs to be initialised must be done so explicitly.
 161 *
 162 * The requests are reference counted.
 163 */
 164struct i915_request {
 165        struct dma_fence fence;
 166        spinlock_t lock;
 167
 168        /**
 169         * Context and ring buffer related to this request
 170         * Contexts are refcounted, so when this request is associated with a
 171         * context, we must increment the context's refcount, to guarantee that
 172         * it persists while any request is linked to it. Requests themselves
 173         * are also refcounted, so the request will only be freed when the last
 174         * reference to it is dismissed, and the code in
 175         * i915_request_free() will then decrement the refcount on the
 176         * context.
 177         */
 178        struct intel_engine_cs *engine;
 179        struct intel_context *context;
 180        struct intel_ring *ring;
 181        struct intel_timeline __rcu *timeline;
 182
 183        struct list_head signal_link;
 184        struct llist_node signal_node;
 185
 186        /*
 187         * The rcu epoch of when this request was allocated. Used to judiciously
 188         * apply backpressure on future allocations to ensure that under
 189         * mempressure there is sufficient RCU ticks for us to reclaim our
 190         * RCU protected slabs.
 191         */
 192        unsigned long rcustate;
 193
 194        /*
 195         * We pin the timeline->mutex while constructing the request to
 196         * ensure that no caller accidentally drops it during construction.
 197         * The timeline->mutex must be held to ensure that only this caller
 198         * can use the ring and manipulate the associated timeline during
 199         * construction.
 200         */
 201        struct pin_cookie cookie;
 202
 203        /*
 204         * Fences for the various phases in the request's lifetime.
 205         *
 206         * The submit fence is used to await upon all of the request's
 207         * dependencies. When it is signaled, the request is ready to run.
 208         * It is used by the driver to then queue the request for execution.
 209         */
 210        struct i915_sw_fence submit;
 211        union {
 212                wait_queue_entry_t submitq;
 213                struct i915_sw_dma_fence_cb dmaq;
 214                struct i915_request_duration_cb {
 215                        struct dma_fence_cb cb;
 216                        ktime_t emitted;
 217                } duration;
 218        };
 219        struct llist_head execute_cb;
 220        struct i915_sw_fence semaphore;
 221
 222        /*
 223         * A list of everyone we wait upon, and everyone who waits upon us.
 224         * Even though we will not be submitted to the hardware before the
 225         * submit fence is signaled (it waits for all external events as well
 226         * as our own requests), the scheduler still needs to know the
 227         * dependency tree for the lifetime of the request (from execbuf
 228         * to retirement), i.e. bidirectional dependency information for the
 229         * request not tied to individual fences.
 230         */
 231        struct i915_sched_node sched;
 232        struct i915_dependency dep;
 233        intel_engine_mask_t execution_mask;
 234
 235        /*
 236         * A convenience pointer to the current breadcrumb value stored in
 237         * the HW status page (or our timeline's local equivalent). The full
 238         * path would be rq->hw_context->ring->timeline->hwsp_seqno.
 239         */
 240        const u32 *hwsp_seqno;
 241
 242        /** Position in the ring of the start of the request */
 243        u32 head;
 244
 245        /** Position in the ring of the start of the user packets */
 246        u32 infix;
 247
 248        /**
 249         * Position in the ring of the start of the postfix.
 250         * This is required to calculate the maximum available ring space
 251         * without overwriting the postfix.
 252         */
 253        u32 postfix;
 254
 255        /** Position in the ring of the end of the whole request */
 256        u32 tail;
 257
 258        /** Position in the ring of the end of any workarounds after the tail */
 259        u32 wa_tail;
 260
 261        /** Preallocate space in the ring for the emitting the request */
 262        u32 reserved_space;
 263
 264        /** Batch buffer related to this request if any (used for
 265         * error state dump only).
 266         */
 267        struct i915_vma *batch;
 268        /**
 269         * Additional buffers requested by userspace to be captured upon
 270         * a GPU hang. The vma/obj on this list are protected by their
 271         * active reference - all objects on this list must also be
 272         * on the active_list (of their final request).
 273         */
 274        struct i915_capture_list *capture_list;
 275
 276        /** Time at which this request was emitted, in jiffies. */
 277        unsigned long emitted_jiffies;
 278
 279        /** timeline->request entry for this request */
 280        struct list_head link;
 281
 282        /** Watchdog support fields. */
 283        struct i915_request_watchdog {
 284                struct llist_node link;
 285                struct hrtimer timer;
 286        } watchdog;
 287
 288        I915_SELFTEST_DECLARE(struct {
 289                struct list_head link;
 290                unsigned long delay;
 291        } mock;)
 292};
 293
 294#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 295
 296extern const struct dma_fence_ops i915_fence_ops;
 297
 298static inline bool dma_fence_is_i915(const struct dma_fence *fence)
 299{
 300        return fence->ops == &i915_fence_ops;
 301}
 302
 303struct kmem_cache *i915_request_slab_cache(void);
 304
 305struct i915_request * __must_check
 306__i915_request_create(struct intel_context *ce, gfp_t gfp);
 307struct i915_request * __must_check
 308i915_request_create(struct intel_context *ce);
 309
 310void __i915_request_skip(struct i915_request *rq);
 311bool i915_request_set_error_once(struct i915_request *rq, int error);
 312struct i915_request *i915_request_mark_eio(struct i915_request *rq);
 313
 314struct i915_request *__i915_request_commit(struct i915_request *request);
 315void __i915_request_queue(struct i915_request *rq,
 316                          const struct i915_sched_attr *attr);
 317void __i915_request_queue_bh(struct i915_request *rq);
 318
 319bool i915_request_retire(struct i915_request *rq);
 320void i915_request_retire_upto(struct i915_request *rq);
 321
 322static inline struct i915_request *
 323to_request(struct dma_fence *fence)
 324{
 325        /* We assume that NULL fence/request are interoperable */
 326        BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
 327        GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
 328        return container_of(fence, struct i915_request, fence);
 329}
 330
 331static inline struct i915_request *
 332i915_request_get(struct i915_request *rq)
 333{
 334        return to_request(dma_fence_get(&rq->fence));
 335}
 336
 337static inline struct i915_request *
 338i915_request_get_rcu(struct i915_request *rq)
 339{
 340        return to_request(dma_fence_get_rcu(&rq->fence));
 341}
 342
 343static inline void
 344i915_request_put(struct i915_request *rq)
 345{
 346        dma_fence_put(&rq->fence);
 347}
 348
 349int i915_request_await_object(struct i915_request *to,
 350                              struct drm_i915_gem_object *obj,
 351                              bool write);
 352int i915_request_await_dma_fence(struct i915_request *rq,
 353                                 struct dma_fence *fence);
 354int i915_request_await_execution(struct i915_request *rq,
 355                                 struct dma_fence *fence,
 356                                 void (*hook)(struct i915_request *rq,
 357                                              struct dma_fence *signal));
 358
 359void i915_request_add(struct i915_request *rq);
 360
 361bool __i915_request_submit(struct i915_request *request);
 362void i915_request_submit(struct i915_request *request);
 363
 364void __i915_request_unsubmit(struct i915_request *request);
 365void i915_request_unsubmit(struct i915_request *request);
 366
 367void i915_request_cancel(struct i915_request *rq, int error);
 368
 369long i915_request_wait(struct i915_request *rq,
 370                       unsigned int flags,
 371                       long timeout)
 372        __attribute__((nonnull(1)));
 373#define I915_WAIT_INTERRUPTIBLE BIT(0)
 374#define I915_WAIT_PRIORITY      BIT(1) /* small priority bump for the request */
 375#define I915_WAIT_ALL           BIT(2) /* used by i915_gem_object_wait() */
 376
 377void i915_request_show(struct drm_printer *m,
 378                       const struct i915_request *rq,
 379                       const char *prefix,
 380                       int indent);
 381
 382static inline bool i915_request_signaled(const struct i915_request *rq)
 383{
 384        /* The request may live longer than its HWSP, so check flags first! */
 385        return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
 386}
 387
 388static inline bool i915_request_is_active(const struct i915_request *rq)
 389{
 390        return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 391}
 392
 393static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
 394{
 395        return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 396}
 397
 398static inline bool
 399i915_request_has_initial_breadcrumb(const struct i915_request *rq)
 400{
 401        return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
 402}
 403
 404/**
 405 * Returns true if seq1 is later than seq2.
 406 */
 407static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 408{
 409        return (s32)(seq1 - seq2) >= 0;
 410}
 411
 412static inline u32 __hwsp_seqno(const struct i915_request *rq)
 413{
 414        const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
 415
 416        return READ_ONCE(*hwsp);
 417}
 418
 419/**
 420 * hwsp_seqno - the current breadcrumb value in the HW status page
 421 * @rq: the request, to chase the relevant HW status page
 422 *
 423 * The emphasis in naming here is that hwsp_seqno() is not a property of the
 424 * request, but an indication of the current HW state (associated with this
 425 * request). Its value will change as the GPU executes more requests.
 426 *
 427 * Returns the current breadcrumb value in the associated HW status page (or
 428 * the local timeline's equivalent) for this request. The request itself
 429 * has the associated breadcrumb value of rq->fence.seqno, when the HW
 430 * status page has that breadcrumb or later, this request is complete.
 431 */
 432static inline u32 hwsp_seqno(const struct i915_request *rq)
 433{
 434        u32 seqno;
 435
 436        rcu_read_lock(); /* the HWSP may be freed at runtime */
 437        seqno = __hwsp_seqno(rq);
 438        rcu_read_unlock();
 439
 440        return seqno;
 441}
 442
 443static inline bool __i915_request_has_started(const struct i915_request *rq)
 444{
 445        return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
 446}
 447
 448/**
 449 * i915_request_started - check if the request has begun being executed
 450 * @rq: the request
 451 *
 452 * If the timeline is not using initial breadcrumbs, a request is
 453 * considered started if the previous request on its timeline (i.e.
 454 * context) has been signaled.
 455 *
 456 * If the timeline is using semaphores, it will also be emitting an
 457 * "initial breadcrumb" after the semaphores are complete and just before
 458 * it began executing the user payload. A request can therefore be active
 459 * on the HW and not yet started as it is still busywaiting on its
 460 * dependencies (via HW semaphores).
 461 *
 462 * If the request has started, its dependencies will have been signaled
 463 * (either by fences or by semaphores) and it will have begun processing
 464 * the user payload.
 465 *
 466 * However, even if a request has started, it may have been preempted and
 467 * so no longer active, or it may have already completed.
 468 *
 469 * See also i915_request_is_active().
 470 *
 471 * Returns true if the request has begun executing the user payload, or
 472 * has completed:
 473 */
 474static inline bool i915_request_started(const struct i915_request *rq)
 475{
 476        bool result;
 477
 478        if (i915_request_signaled(rq))
 479                return true;
 480
 481        result = true;
 482        rcu_read_lock(); /* the HWSP may be freed at runtime */
 483        if (likely(!i915_request_signaled(rq)))
 484                /* Remember: started but may have since been preempted! */
 485                result = __i915_request_has_started(rq);
 486        rcu_read_unlock();
 487
 488        return result;
 489}
 490
 491/**
 492 * i915_request_is_running - check if the request may actually be executing
 493 * @rq: the request
 494 *
 495 * Returns true if the request is currently submitted to hardware, has passed
 496 * its start point (i.e. the context is setup and not busywaiting). Note that
 497 * it may no longer be running by the time the function returns!
 498 */
 499static inline bool i915_request_is_running(const struct i915_request *rq)
 500{
 501        bool result;
 502
 503        if (!i915_request_is_active(rq))
 504                return false;
 505
 506        rcu_read_lock();
 507        result = __i915_request_has_started(rq) && i915_request_is_active(rq);
 508        rcu_read_unlock();
 509
 510        return result;
 511}
 512
 513/**
 514 * i915_request_is_ready - check if the request is ready for execution
 515 * @rq: the request
 516 *
 517 * Upon construction, the request is instructed to wait upon various
 518 * signals before it is ready to be executed by the HW. That is, we do
 519 * not want to start execution and read data before it is written. In practice,
 520 * this is controlled with a mixture of interrupts and semaphores. Once
 521 * the submit fence is completed, the backend scheduler will place the
 522 * request into its queue and from there submit it for execution. So we
 523 * can detect when a request is eligible for execution (and is under control
 524 * of the scheduler) by querying where it is in any of the scheduler's lists.
 525 *
 526 * Returns true if the request is ready for execution (it may be inflight),
 527 * false otherwise.
 528 */
 529static inline bool i915_request_is_ready(const struct i915_request *rq)
 530{
 531        return !list_empty(&rq->sched.link);
 532}
 533
 534static inline bool __i915_request_is_complete(const struct i915_request *rq)
 535{
 536        return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
 537}
 538
 539static inline bool i915_request_completed(const struct i915_request *rq)
 540{
 541        bool result;
 542
 543        if (i915_request_signaled(rq))
 544                return true;
 545
 546        result = true;
 547        rcu_read_lock(); /* the HWSP may be freed at runtime */
 548        if (likely(!i915_request_signaled(rq)))
 549                result = __i915_request_is_complete(rq);
 550        rcu_read_unlock();
 551
 552        return result;
 553}
 554
 555static inline void i915_request_mark_complete(struct i915_request *rq)
 556{
 557        WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
 558                   (u32 *)&rq->fence.seqno);
 559}
 560
 561static inline bool i915_request_has_waitboost(const struct i915_request *rq)
 562{
 563        return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
 564}
 565
 566static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
 567{
 568        /* Preemption should only be disabled very rarely */
 569        return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
 570}
 571
 572static inline bool i915_request_has_sentinel(const struct i915_request *rq)
 573{
 574        return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
 575}
 576
 577static inline bool i915_request_on_hold(const struct i915_request *rq)
 578{
 579        return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
 580}
 581
 582static inline void i915_request_set_hold(struct i915_request *rq)
 583{
 584        set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
 585}
 586
 587static inline void i915_request_clear_hold(struct i915_request *rq)
 588{
 589        clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
 590}
 591
 592static inline struct intel_timeline *
 593i915_request_timeline(const struct i915_request *rq)
 594{
 595        /* Valid only while the request is being constructed (or retired). */
 596        return rcu_dereference_protected(rq->timeline,
 597                                         lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
 598}
 599
 600static inline struct i915_gem_context *
 601i915_request_gem_context(const struct i915_request *rq)
 602{
 603        /* Valid only while the request is being constructed (or retired). */
 604        return rcu_dereference_protected(rq->context->gem_context, true);
 605}
 606
 607static inline struct intel_timeline *
 608i915_request_active_timeline(const struct i915_request *rq)
 609{
 610        /*
 611         * When in use during submission, we are protected by a guarantee that
 612         * the context/timeline is pinned and must remain pinned until after
 613         * this submission.
 614         */
 615        return rcu_dereference_protected(rq->timeline,
 616                                         lockdep_is_held(&rq->engine->active.lock));
 617}
 618
 619static inline u32
 620i915_request_active_seqno(const struct i915_request *rq)
 621{
 622        u32 hwsp_phys_base =
 623                page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
 624        u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
 625
 626        /*
 627         * Because of wraparound, we cannot simply take tl->hwsp_offset,
 628         * but instead use the fact that the relative for vaddr is the
 629         * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
 630         * and combine them with the relative offset in rq->hwsp_seqno.
 631         *
 632         * As rw->hwsp_seqno is rewritten when signaled, this only works
 633         * when the request isn't signaled yet, but at that point you
 634         * no longer need the offset.
 635         */
 636
 637        return hwsp_phys_base + hwsp_relative_offset;
 638}
 639
 640bool
 641i915_request_active_engine(struct i915_request *rq,
 642                           struct intel_engine_cs **active);
 643
 644#endif /* I915_REQUEST_H */
 645