linux/drivers/gpu/drm/i915/i915_request.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2018 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#ifndef I915_REQUEST_H
  26#define I915_REQUEST_H
  27
  28#include <linux/dma-fence.h>
  29#include <linux/irq_work.h>
  30#include <linux/lockdep.h>
  31
  32#include "gem/i915_gem_context_types.h"
  33#include "gt/intel_context_types.h"
  34#include "gt/intel_engine_types.h"
  35#include "gt/intel_timeline_types.h"
  36
  37#include "i915_gem.h"
  38#include "i915_scheduler.h"
  39#include "i915_selftest.h"
  40#include "i915_sw_fence.h"
  41
  42#include <uapi/drm/i915_drm.h>
  43
  44struct drm_file;
  45struct drm_i915_gem_object;
  46struct i915_request;
  47
  48struct i915_capture_list {
  49        struct i915_capture_list *next;
  50        struct i915_vma *vma;
  51};
  52
  53#define RQ_TRACE(rq, fmt, ...) do {                                     \
  54        const struct i915_request *rq__ = (rq);                         \
  55        ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,  \
  56                     rq__->fence.context, rq__->fence.seqno,            \
  57                     hwsp_seqno(rq__), ##__VA_ARGS__);                  \
  58} while (0)
  59
  60enum {
  61        /*
  62         * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
  63         *
  64         * Set by __i915_request_submit() on handing over to HW, and cleared
  65         * by __i915_request_unsubmit() if we preempt this request.
  66         *
  67         * Finally cleared for consistency on retiring the request, when
  68         * we know the HW is no longer running this request.
  69         *
  70         * See i915_request_is_active()
  71         */
  72        I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
  73
  74        /*
  75         * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
  76         *
  77         * Using the scheduler, when a request is ready for execution it is put
  78         * into the priority queue, and removed from that queue when transferred
  79         * to the HW runlists. We want to track its membership within the
  80         * priority queue so that we can easily check before rescheduling.
  81         *
  82         * See i915_request_in_priority_queue()
  83         */
  84        I915_FENCE_FLAG_PQUEUE,
  85
  86        /*
  87         * I915_FENCE_FLAG_HOLD - this request is currently on hold
  88         *
  89         * This request has been suspended, pending an ongoing investigation.
  90         */
  91        I915_FENCE_FLAG_HOLD,
  92
  93        /*
  94         * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
  95         * breadcrumb that marks the end of semaphore waits and start of the
  96         * user payload.
  97         */
  98        I915_FENCE_FLAG_INITIAL_BREADCRUMB,
  99
 100        /*
 101         * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
 102         *
 103         * Internal bookkeeping used by the breadcrumb code to track when
 104         * a request is on the various signal_list.
 105         */
 106        I915_FENCE_FLAG_SIGNAL,
 107
 108        /*
 109         * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
 110         *
 111         * The execution of some requests should not be interrupted. This is
 112         * a sensitive operation as it makes the request super important,
 113         * blocking other higher priority work. Abuse of this flag will
 114         * lead to quality of service issues.
 115         */
 116        I915_FENCE_FLAG_NOPREEMPT,
 117
 118        /*
 119         * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
 120         *
 121         * A high priority sentinel request may be submitted to clear the
 122         * submission queue. As it will be the only request in-flight, upon
 123         * execution all other active requests will have been preempted and
 124         * unsubmitted. This preemptive pulse is used to re-evaluate the
 125         * in-flight requests, particularly in cases where an active context
 126         * is banned and those active requests need to be cancelled.
 127         */
 128        I915_FENCE_FLAG_SENTINEL,
 129
 130        /*
 131         * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
 132         *
 133         * Some requests are more important than others! In particular, a
 134         * request that the user is waiting on is typically required for
 135         * interactive latency, for which we want to minimise by upclocking
 136         * the GPU. Here we track such boost requests on a per-request basis.
 137         */
 138        I915_FENCE_FLAG_BOOST,
 139};
 140
 141/**
 142 * Request queue structure.
 143 *
 144 * The request queue allows us to note sequence numbers that have been emitted
 145 * and may be associated with active buffers to be retired.
 146 *
 147 * By keeping this list, we can avoid having to do questionable sequence
 148 * number comparisons on buffer last_read|write_seqno. It also allows an
 149 * emission time to be associated with the request for tracking how far ahead
 150 * of the GPU the submission is.
 151 *
 152 * When modifying this structure be very aware that we perform a lockless
 153 * RCU lookup of it that may race against reallocation of the struct
 154 * from the slab freelist. We intentionally do not zero the structure on
 155 * allocation so that the lookup can use the dangling pointers (and is
 156 * cogniscent that those pointers may be wrong). Instead, everything that
 157 * needs to be initialised must be done so explicitly.
 158 *
 159 * The requests are reference counted.
 160 */
 161struct i915_request {
 162        struct dma_fence fence;
 163        spinlock_t lock;
 164
 165        /**
 166         * Context and ring buffer related to this request
 167         * Contexts are refcounted, so when this request is associated with a
 168         * context, we must increment the context's refcount, to guarantee that
 169         * it persists while any request is linked to it. Requests themselves
 170         * are also refcounted, so the request will only be freed when the last
 171         * reference to it is dismissed, and the code in
 172         * i915_request_free() will then decrement the refcount on the
 173         * context.
 174         */
 175        struct intel_engine_cs *engine;
 176        struct intel_context *context;
 177        struct intel_ring *ring;
 178        struct intel_timeline __rcu *timeline;
 179        struct list_head signal_link;
 180
 181        /*
 182         * The rcu epoch of when this request was allocated. Used to judiciously
 183         * apply backpressure on future allocations to ensure that under
 184         * mempressure there is sufficient RCU ticks for us to reclaim our
 185         * RCU protected slabs.
 186         */
 187        unsigned long rcustate;
 188
 189        /*
 190         * We pin the timeline->mutex while constructing the request to
 191         * ensure that no caller accidentally drops it during construction.
 192         * The timeline->mutex must be held to ensure that only this caller
 193         * can use the ring and manipulate the associated timeline during
 194         * construction.
 195         */
 196        struct pin_cookie cookie;
 197
 198        /*
 199         * Fences for the various phases in the request's lifetime.
 200         *
 201         * The submit fence is used to await upon all of the request's
 202         * dependencies. When it is signaled, the request is ready to run.
 203         * It is used by the driver to then queue the request for execution.
 204         */
 205        struct i915_sw_fence submit;
 206        union {
 207                wait_queue_entry_t submitq;
 208                struct i915_sw_dma_fence_cb dmaq;
 209                struct i915_request_duration_cb {
 210                        struct dma_fence_cb cb;
 211                        ktime_t emitted;
 212                } duration;
 213        };
 214        struct llist_head execute_cb;
 215        struct i915_sw_fence semaphore;
 216
 217        /*
 218         * A list of everyone we wait upon, and everyone who waits upon us.
 219         * Even though we will not be submitted to the hardware before the
 220         * submit fence is signaled (it waits for all external events as well
 221         * as our own requests), the scheduler still needs to know the
 222         * dependency tree for the lifetime of the request (from execbuf
 223         * to retirement), i.e. bidirectional dependency information for the
 224         * request not tied to individual fences.
 225         */
 226        struct i915_sched_node sched;
 227        struct i915_dependency dep;
 228        intel_engine_mask_t execution_mask;
 229
 230        /*
 231         * A convenience pointer to the current breadcrumb value stored in
 232         * the HW status page (or our timeline's local equivalent). The full
 233         * path would be rq->hw_context->ring->timeline->hwsp_seqno.
 234         */
 235        const u32 *hwsp_seqno;
 236
 237        /*
 238         * If we need to access the timeline's seqno for this request in
 239         * another request, we need to keep a read reference to this associated
 240         * cacheline, so that we do not free and recycle it before the foreign
 241         * observers have completed. Hence, we keep a pointer to the cacheline
 242         * inside the timeline's HWSP vma, but it is only valid while this
 243         * request has not completed and guarded by the timeline mutex.
 244         */
 245        struct intel_timeline_cacheline __rcu *hwsp_cacheline;
 246
 247        /** Position in the ring of the start of the request */
 248        u32 head;
 249
 250        /** Position in the ring of the start of the user packets */
 251        u32 infix;
 252
 253        /**
 254         * Position in the ring of the start of the postfix.
 255         * This is required to calculate the maximum available ring space
 256         * without overwriting the postfix.
 257         */
 258        u32 postfix;
 259
 260        /** Position in the ring of the end of the whole request */
 261        u32 tail;
 262
 263        /** Position in the ring of the end of any workarounds after the tail */
 264        u32 wa_tail;
 265
 266        /** Preallocate space in the ring for the emitting the request */
 267        u32 reserved_space;
 268
 269        /** Batch buffer related to this request if any (used for
 270         * error state dump only).
 271         */
 272        struct i915_vma *batch;
 273        /**
 274         * Additional buffers requested by userspace to be captured upon
 275         * a GPU hang. The vma/obj on this list are protected by their
 276         * active reference - all objects on this list must also be
 277         * on the active_list (of their final request).
 278         */
 279        struct i915_capture_list *capture_list;
 280
 281        /** Time at which this request was emitted, in jiffies. */
 282        unsigned long emitted_jiffies;
 283
 284        /** timeline->request entry for this request */
 285        struct list_head link;
 286
 287        struct drm_i915_file_private *file_priv;
 288        /** file_priv list entry for this request */
 289        struct list_head client_link;
 290
 291        I915_SELFTEST_DECLARE(struct {
 292                struct list_head link;
 293                unsigned long delay;
 294        } mock;)
 295};
 296
 297#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 298
 299extern const struct dma_fence_ops i915_fence_ops;
 300
 301static inline bool dma_fence_is_i915(const struct dma_fence *fence)
 302{
 303        return fence->ops == &i915_fence_ops;
 304}
 305
 306struct kmem_cache *i915_request_slab_cache(void);
 307
 308struct i915_request * __must_check
 309__i915_request_create(struct intel_context *ce, gfp_t gfp);
 310struct i915_request * __must_check
 311i915_request_create(struct intel_context *ce);
 312
 313void i915_request_set_error_once(struct i915_request *rq, int error);
 314void __i915_request_skip(struct i915_request *rq);
 315
 316struct i915_request *__i915_request_commit(struct i915_request *request);
 317void __i915_request_queue(struct i915_request *rq,
 318                          const struct i915_sched_attr *attr);
 319
 320bool i915_request_retire(struct i915_request *rq);
 321void i915_request_retire_upto(struct i915_request *rq);
 322
 323static inline struct i915_request *
 324to_request(struct dma_fence *fence)
 325{
 326        /* We assume that NULL fence/request are interoperable */
 327        BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
 328        GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
 329        return container_of(fence, struct i915_request, fence);
 330}
 331
 332static inline struct i915_request *
 333i915_request_get(struct i915_request *rq)
 334{
 335        return to_request(dma_fence_get(&rq->fence));
 336}
 337
 338static inline struct i915_request *
 339i915_request_get_rcu(struct i915_request *rq)
 340{
 341        return to_request(dma_fence_get_rcu(&rq->fence));
 342}
 343
 344static inline void
 345i915_request_put(struct i915_request *rq)
 346{
 347        dma_fence_put(&rq->fence);
 348}
 349
 350int i915_request_await_object(struct i915_request *to,
 351                              struct drm_i915_gem_object *obj,
 352                              bool write);
 353int i915_request_await_dma_fence(struct i915_request *rq,
 354                                 struct dma_fence *fence);
 355int i915_request_await_execution(struct i915_request *rq,
 356                                 struct dma_fence *fence,
 357                                 void (*hook)(struct i915_request *rq,
 358                                              struct dma_fence *signal));
 359
 360void i915_request_add(struct i915_request *rq);
 361
 362bool __i915_request_submit(struct i915_request *request);
 363void i915_request_submit(struct i915_request *request);
 364
 365void __i915_request_unsubmit(struct i915_request *request);
 366void i915_request_unsubmit(struct i915_request *request);
 367
 368/* Note: part of the intel_breadcrumbs family */
 369bool i915_request_enable_breadcrumb(struct i915_request *request);
 370void i915_request_cancel_breadcrumb(struct i915_request *request);
 371
 372long i915_request_wait(struct i915_request *rq,
 373                       unsigned int flags,
 374                       long timeout)
 375        __attribute__((nonnull(1)));
 376#define I915_WAIT_INTERRUPTIBLE BIT(0)
 377#define I915_WAIT_PRIORITY      BIT(1) /* small priority bump for the request */
 378#define I915_WAIT_ALL           BIT(2) /* used by i915_gem_object_wait() */
 379
 380static inline bool i915_request_signaled(const struct i915_request *rq)
 381{
 382        /* The request may live longer than its HWSP, so check flags first! */
 383        return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
 384}
 385
 386static inline bool i915_request_is_active(const struct i915_request *rq)
 387{
 388        return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 389}
 390
 391static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
 392{
 393        return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 394}
 395
 396static inline bool
 397i915_request_has_initial_breadcrumb(const struct i915_request *rq)
 398{
 399        return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
 400}
 401
 402/**
 403 * Returns true if seq1 is later than seq2.
 404 */
 405static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 406{
 407        return (s32)(seq1 - seq2) >= 0;
 408}
 409
 410static inline u32 __hwsp_seqno(const struct i915_request *rq)
 411{
 412        const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
 413
 414        return READ_ONCE(*hwsp);
 415}
 416
 417/**
 418 * hwsp_seqno - the current breadcrumb value in the HW status page
 419 * @rq: the request, to chase the relevant HW status page
 420 *
 421 * The emphasis in naming here is that hwsp_seqno() is not a property of the
 422 * request, but an indication of the current HW state (associated with this
 423 * request). Its value will change as the GPU executes more requests.
 424 *
 425 * Returns the current breadcrumb value in the associated HW status page (or
 426 * the local timeline's equivalent) for this request. The request itself
 427 * has the associated breadcrumb value of rq->fence.seqno, when the HW
 428 * status page has that breadcrumb or later, this request is complete.
 429 */
 430static inline u32 hwsp_seqno(const struct i915_request *rq)
 431{
 432        u32 seqno;
 433
 434        rcu_read_lock(); /* the HWSP may be freed at runtime */
 435        seqno = __hwsp_seqno(rq);
 436        rcu_read_unlock();
 437
 438        return seqno;
 439}
 440
 441static inline bool __i915_request_has_started(const struct i915_request *rq)
 442{
 443        return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
 444}
 445
 446/**
 447 * i915_request_started - check if the request has begun being executed
 448 * @rq: the request
 449 *
 450 * If the timeline is not using initial breadcrumbs, a request is
 451 * considered started if the previous request on its timeline (i.e.
 452 * context) has been signaled.
 453 *
 454 * If the timeline is using semaphores, it will also be emitting an
 455 * "initial breadcrumb" after the semaphores are complete and just before
 456 * it began executing the user payload. A request can therefore be active
 457 * on the HW and not yet started as it is still busywaiting on its
 458 * dependencies (via HW semaphores).
 459 *
 460 * If the request has started, its dependencies will have been signaled
 461 * (either by fences or by semaphores) and it will have begun processing
 462 * the user payload.
 463 *
 464 * However, even if a request has started, it may have been preempted and
 465 * so no longer active, or it may have already completed.
 466 *
 467 * See also i915_request_is_active().
 468 *
 469 * Returns true if the request has begun executing the user payload, or
 470 * has completed:
 471 */
 472static inline bool i915_request_started(const struct i915_request *rq)
 473{
 474        if (i915_request_signaled(rq))
 475                return true;
 476
 477        /* Remember: started but may have since been preempted! */
 478        return __i915_request_has_started(rq);
 479}
 480
 481/**
 482 * i915_request_is_running - check if the request may actually be executing
 483 * @rq: the request
 484 *
 485 * Returns true if the request is currently submitted to hardware, has passed
 486 * its start point (i.e. the context is setup and not busywaiting). Note that
 487 * it may no longer be running by the time the function returns!
 488 */
 489static inline bool i915_request_is_running(const struct i915_request *rq)
 490{
 491        if (!i915_request_is_active(rq))
 492                return false;
 493
 494        return __i915_request_has_started(rq);
 495}
 496
 497/**
 498 * i915_request_is_ready - check if the request is ready for execution
 499 * @rq: the request
 500 *
 501 * Upon construction, the request is instructed to wait upon various
 502 * signals before it is ready to be executed by the HW. That is, we do
 503 * not want to start execution and read data before it is written. In practice,
 504 * this is controlled with a mixture of interrupts and semaphores. Once
 505 * the submit fence is completed, the backend scheduler will place the
 506 * request into its queue and from there submit it for execution. So we
 507 * can detect when a request is eligible for execution (and is under control
 508 * of the scheduler) by querying where it is in any of the scheduler's lists.
 509 *
 510 * Returns true if the request is ready for execution (it may be inflight),
 511 * false otherwise.
 512 */
 513static inline bool i915_request_is_ready(const struct i915_request *rq)
 514{
 515        return !list_empty(&rq->sched.link);
 516}
 517
 518static inline bool i915_request_completed(const struct i915_request *rq)
 519{
 520        if (i915_request_signaled(rq))
 521                return true;
 522
 523        return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
 524}
 525
 526static inline void i915_request_mark_complete(struct i915_request *rq)
 527{
 528        WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
 529                   (u32 *)&rq->fence.seqno);
 530}
 531
 532static inline bool i915_request_has_waitboost(const struct i915_request *rq)
 533{
 534        return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
 535}
 536
 537static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
 538{
 539        /* Preemption should only be disabled very rarely */
 540        return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
 541}
 542
 543static inline bool i915_request_has_sentinel(const struct i915_request *rq)
 544{
 545        return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
 546}
 547
 548static inline bool i915_request_on_hold(const struct i915_request *rq)
 549{
 550        return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
 551}
 552
 553static inline void i915_request_set_hold(struct i915_request *rq)
 554{
 555        set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
 556}
 557
 558static inline void i915_request_clear_hold(struct i915_request *rq)
 559{
 560        clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
 561}
 562
 563static inline struct intel_timeline *
 564i915_request_timeline(const struct i915_request *rq)
 565{
 566        /* Valid only while the request is being constructed (or retired). */
 567        return rcu_dereference_protected(rq->timeline,
 568                                         lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
 569}
 570
 571static inline struct i915_gem_context *
 572i915_request_gem_context(const struct i915_request *rq)
 573{
 574        /* Valid only while the request is being constructed (or retired). */
 575        return rcu_dereference_protected(rq->context->gem_context, true);
 576}
 577
 578static inline struct intel_timeline *
 579i915_request_active_timeline(const struct i915_request *rq)
 580{
 581        /*
 582         * When in use during submission, we are protected by a guarantee that
 583         * the context/timeline is pinned and must remain pinned until after
 584         * this submission.
 585         */
 586        return rcu_dereference_protected(rq->timeline,
 587                                         lockdep_is_held(&rq->engine->active.lock));
 588}
 589
 590#endif /* I915_REQUEST_H */
 591