linux/drivers/gpu/drm/i915/i915_gem_request.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#ifndef I915_GEM_REQUEST_H
  26#define I915_GEM_REQUEST_H
  27
  28#include <linux/dma-fence.h>
  29
  30#include "i915_gem.h"
  31#include "i915_sw_fence.h"
  32
  33struct drm_file;
  34struct drm_i915_gem_object;
  35
  36struct intel_wait {
  37        struct rb_node node;
  38        struct task_struct *tsk;
  39        u32 seqno;
  40};
  41
  42struct intel_signal_node {
  43        struct rb_node node;
  44        struct intel_wait wait;
  45};
  46
  47struct i915_dependency {
  48        struct i915_priotree *signaler;
  49        struct list_head signal_link;
  50        struct list_head wait_link;
  51        struct list_head dfs_link;
  52        unsigned long flags;
  53#define I915_DEPENDENCY_ALLOC BIT(0)
  54};
  55
  56/* Requests exist in a complex web of interdependencies. Each request
  57 * has to wait for some other request to complete before it is ready to be run
  58 * (e.g. we have to wait until the pixels have been rendering into a texture
  59 * before we can copy from it). We track the readiness of a request in terms
  60 * of fences, but we also need to keep the dependency tree for the lifetime
  61 * of the request (beyond the life of an individual fence). We use the tree
  62 * at various points to reorder the requests whilst keeping the requests
  63 * in order with respect to their various dependencies.
  64 */
  65struct i915_priotree {
  66        struct list_head signalers_list; /* those before us, we depend upon */
  67        struct list_head waiters_list; /* those after us, they depend upon us */
  68        struct rb_node node;
  69        int priority;
  70#define I915_PRIORITY_MAX 1024
  71#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
  72};
  73
  74/**
  75 * Request queue structure.
  76 *
  77 * The request queue allows us to note sequence numbers that have been emitted
  78 * and may be associated with active buffers to be retired.
  79 *
  80 * By keeping this list, we can avoid having to do questionable sequence
  81 * number comparisons on buffer last_read|write_seqno. It also allows an
  82 * emission time to be associated with the request for tracking how far ahead
  83 * of the GPU the submission is.
  84 *
  85 * When modifying this structure be very aware that we perform a lockless
  86 * RCU lookup of it that may race against reallocation of the struct
  87 * from the slab freelist. We intentionally do not zero the structure on
  88 * allocation so that the lookup can use the dangling pointers (and is
  89 * cogniscent that those pointers may be wrong). Instead, everything that
  90 * needs to be initialised must be done so explicitly.
  91 *
  92 * The requests are reference counted.
  93 */
  94struct drm_i915_gem_request {
  95        struct dma_fence fence;
  96        spinlock_t lock;
  97
  98        /** On Which ring this request was generated */
  99        struct drm_i915_private *i915;
 100
 101        /**
 102         * Context and ring buffer related to this request
 103         * Contexts are refcounted, so when this request is associated with a
 104         * context, we must increment the context's refcount, to guarantee that
 105         * it persists while any request is linked to it. Requests themselves
 106         * are also refcounted, so the request will only be freed when the last
 107         * reference to it is dismissed, and the code in
 108         * i915_gem_request_free() will then decrement the refcount on the
 109         * context.
 110         */
 111        struct i915_gem_context *ctx;
 112        struct intel_engine_cs *engine;
 113        struct intel_ring *ring;
 114        struct intel_timeline *timeline;
 115        struct intel_signal_node signaling;
 116
 117        /* Fences for the various phases in the request's lifetime.
 118         *
 119         * The submit fence is used to await upon all of the request's
 120         * dependencies. When it is signaled, the request is ready to run.
 121         * It is used by the driver to then queue the request for execution.
 122         *
 123         * The execute fence is used to signal when the request has been
 124         * sent to hardware.
 125         *
 126         * It is illegal for the submit fence of one request to wait upon the
 127         * execute fence of an earlier request. It should be sufficient to
 128         * wait upon the submit fence of the earlier request.
 129         */
 130        struct i915_sw_fence submit;
 131        struct i915_sw_fence execute;
 132        wait_queue_t submitq;
 133        wait_queue_t execq;
 134
 135        /* A list of everyone we wait upon, and everyone who waits upon us.
 136         * Even though we will not be submitted to the hardware before the
 137         * submit fence is signaled (it waits for all external events as well
 138         * as our own requests), the scheduler still needs to know the
 139         * dependency tree for the lifetime of the request (from execbuf
 140         * to retirement), i.e. bidirectional dependency information for the
 141         * request not tied to individual fences.
 142         */
 143        struct i915_priotree priotree;
 144        struct i915_dependency dep;
 145
 146        u32 global_seqno;
 147
 148        /** GEM sequence number associated with the previous request,
 149         * when the HWS breadcrumb is equal to this the GPU is processing
 150         * this request.
 151         */
 152        u32 previous_seqno;
 153
 154        /** Position in the ring of the start of the request */
 155        u32 head;
 156
 157        /**
 158         * Position in the ring of the start of the postfix.
 159         * This is required to calculate the maximum available ring space
 160         * without overwriting the postfix.
 161         */
 162        u32 postfix;
 163
 164        /** Position in the ring of the end of the whole request */
 165        u32 tail;
 166
 167        /** Position in the ring of the end of any workarounds after the tail */
 168        u32 wa_tail;
 169
 170        /** Preallocate space in the ring for the emitting the request */
 171        u32 reserved_space;
 172
 173        /** Batch buffer related to this request if any (used for
 174         * error state dump only).
 175         */
 176        struct i915_vma *batch;
 177        struct list_head active_list;
 178
 179        /** Time at which this request was emitted, in jiffies. */
 180        unsigned long emitted_jiffies;
 181
 182        /** engine->request_list entry for this request */
 183        struct list_head link;
 184
 185        /** ring->request_list entry for this request */
 186        struct list_head ring_link;
 187
 188        struct drm_i915_file_private *file_priv;
 189        /** file_priv list entry for this request */
 190        struct list_head client_list;
 191};
 192
 193extern const struct dma_fence_ops i915_fence_ops;
 194
 195static inline bool dma_fence_is_i915(const struct dma_fence *fence)
 196{
 197        return fence->ops == &i915_fence_ops;
 198}
 199
 200struct drm_i915_gem_request * __must_check
 201i915_gem_request_alloc(struct intel_engine_cs *engine,
 202                       struct i915_gem_context *ctx);
 203int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 204                                   struct drm_file *file);
 205void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
 206
 207static inline struct drm_i915_gem_request *
 208to_request(struct dma_fence *fence)
 209{
 210        /* We assume that NULL fence/request are interoperable */
 211        BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0);
 212        GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
 213        return container_of(fence, struct drm_i915_gem_request, fence);
 214}
 215
 216static inline struct drm_i915_gem_request *
 217i915_gem_request_get(struct drm_i915_gem_request *req)
 218{
 219        return to_request(dma_fence_get(&req->fence));
 220}
 221
 222static inline struct drm_i915_gem_request *
 223i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
 224{
 225        return to_request(dma_fence_get_rcu(&req->fence));
 226}
 227
 228static inline void
 229i915_gem_request_put(struct drm_i915_gem_request *req)
 230{
 231        dma_fence_put(&req->fence);
 232}
 233
 234static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
 235                                           struct drm_i915_gem_request *src)
 236{
 237        if (src)
 238                i915_gem_request_get(src);
 239
 240        if (*pdst)
 241                i915_gem_request_put(*pdst);
 242
 243        *pdst = src;
 244}
 245
 246int
 247i915_gem_request_await_object(struct drm_i915_gem_request *to,
 248                              struct drm_i915_gem_object *obj,
 249                              bool write);
 250int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
 251                                     struct dma_fence *fence);
 252
 253void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
 254#define i915_add_request(req) \
 255        __i915_add_request(req, true)
 256#define i915_add_request_no_flush(req) \
 257        __i915_add_request(req, false)
 258
 259void __i915_gem_request_submit(struct drm_i915_gem_request *request);
 260void i915_gem_request_submit(struct drm_i915_gem_request *request);
 261
 262struct intel_rps_client;
 263#define NO_WAITBOOST ERR_PTR(-1)
 264#define IS_RPS_CLIENT(p) (!IS_ERR(p))
 265#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 266
 267long i915_wait_request(struct drm_i915_gem_request *req,
 268                       unsigned int flags,
 269                       long timeout)
 270        __attribute__((nonnull(1)));
 271#define I915_WAIT_INTERRUPTIBLE BIT(0)
 272#define I915_WAIT_LOCKED        BIT(1) /* struct_mutex held, handle GPU reset */
 273#define I915_WAIT_ALL           BIT(2) /* used by i915_gem_object_wait() */
 274
 275static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 276
 277/**
 278 * Returns true if seq1 is later than seq2.
 279 */
 280static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 281{
 282        return (s32)(seq1 - seq2) >= 0;
 283}
 284
 285static inline bool
 286__i915_gem_request_started(const struct drm_i915_gem_request *req)
 287{
 288        GEM_BUG_ON(!req->global_seqno);
 289        return i915_seqno_passed(intel_engine_get_seqno(req->engine),
 290                                 req->previous_seqno);
 291}
 292
 293static inline bool
 294i915_gem_request_started(const struct drm_i915_gem_request *req)
 295{
 296        if (!req->global_seqno)
 297                return false;
 298
 299        return __i915_gem_request_started(req);
 300}
 301
 302static inline bool
 303__i915_gem_request_completed(const struct drm_i915_gem_request *req)
 304{
 305        GEM_BUG_ON(!req->global_seqno);
 306        return i915_seqno_passed(intel_engine_get_seqno(req->engine),
 307                                 req->global_seqno);
 308}
 309
 310static inline bool
 311i915_gem_request_completed(const struct drm_i915_gem_request *req)
 312{
 313        if (!req->global_seqno)
 314                return false;
 315
 316        return __i915_gem_request_completed(req);
 317}
 318
 319bool __i915_spin_request(const struct drm_i915_gem_request *request,
 320                         int state, unsigned long timeout_us);
 321static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
 322                                     int state, unsigned long timeout_us)
 323{
 324        return (__i915_gem_request_started(request) &&
 325                __i915_spin_request(request, state, timeout_us));
 326}
 327
 328/* We treat requests as fences. This is not be to confused with our
 329 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
 330 * We use the fences to synchronize access from the CPU with activity on the
 331 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
 332 * is reading them. We also track fences at a higher level to provide
 333 * implicit synchronisation around GEM objects, e.g. set-domain will wait
 334 * for outstanding GPU rendering before marking the object ready for CPU
 335 * access, or a pageflip will wait until the GPU is complete before showing
 336 * the frame on the scanout.
 337 *
 338 * In order to use a fence, the object must track the fence it needs to
 339 * serialise with. For example, GEM objects want to track both read and
 340 * write access so that we can perform concurrent read operations between
 341 * the CPU and GPU engines, as well as waiting for all rendering to
 342 * complete, or waiting for the last GPU user of a "fence register". The
 343 * object then embeds a #i915_gem_active to track the most recent (in
 344 * retirement order) request relevant for the desired mode of access.
 345 * The #i915_gem_active is updated with i915_gem_active_set() to track the
 346 * most recent fence request, typically this is done as part of
 347 * i915_vma_move_to_active().
 348 *
 349 * When the #i915_gem_active completes (is retired), it will
 350 * signal its completion to the owner through a callback as well as mark
 351 * itself as idle (i915_gem_active.request == NULL). The owner
 352 * can then perform any action, such as delayed freeing of an active
 353 * resource including itself.
 354 */
 355struct i915_gem_active;
 356
 357typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
 358                                   struct drm_i915_gem_request *);
 359
 360struct i915_gem_active {
 361        struct drm_i915_gem_request __rcu *request;
 362        struct list_head link;
 363        i915_gem_retire_fn retire;
 364};
 365
 366void i915_gem_retire_noop(struct i915_gem_active *,
 367                          struct drm_i915_gem_request *request);
 368
 369/**
 370 * init_request_active - prepares the activity tracker for use
 371 * @active - the active tracker
 372 * @func - a callback when then the tracker is retired (becomes idle),
 373 *         can be NULL
 374 *
 375 * init_request_active() prepares the embedded @active struct for use as
 376 * an activity tracker, that is for tracking the last known active request
 377 * associated with it. When the last request becomes idle, when it is retired
 378 * after completion, the optional callback @func is invoked.
 379 */
 380static inline void
 381init_request_active(struct i915_gem_active *active,
 382                    i915_gem_retire_fn retire)
 383{
 384        INIT_LIST_HEAD(&active->link);
 385        active->retire = retire ?: i915_gem_retire_noop;
 386}
 387
 388/**
 389 * i915_gem_active_set - updates the tracker to watch the current request
 390 * @active - the active tracker
 391 * @request - the request to watch
 392 *
 393 * i915_gem_active_set() watches the given @request for completion. Whilst
 394 * that @request is busy, the @active reports busy. When that @request is
 395 * retired, the @active tracker is updated to report idle.
 396 */
 397static inline void
 398i915_gem_active_set(struct i915_gem_active *active,
 399                    struct drm_i915_gem_request *request)
 400{
 401        list_move(&active->link, &request->active_list);
 402        rcu_assign_pointer(active->request, request);
 403}
 404
 405/**
 406 * i915_gem_active_set_retire_fn - updates the retirement callback
 407 * @active - the active tracker
 408 * @fn - the routine called when the request is retired
 409 * @mutex - struct_mutex used to guard retirements
 410 *
 411 * i915_gem_active_set_retire_fn() updates the function pointer that
 412 * is called when the final request associated with the @active tracker
 413 * is retired.
 414 */
 415static inline void
 416i915_gem_active_set_retire_fn(struct i915_gem_active *active,
 417                              i915_gem_retire_fn fn,
 418                              struct mutex *mutex)
 419{
 420        lockdep_assert_held(mutex);
 421        active->retire = fn ?: i915_gem_retire_noop;
 422}
 423
 424static inline struct drm_i915_gem_request *
 425__i915_gem_active_peek(const struct i915_gem_active *active)
 426{
 427        /* Inside the error capture (running with the driver in an unknown
 428         * state), we want to bend the rules slightly (a lot).
 429         *
 430         * Work is in progress to make it safer, in the meantime this keeps
 431         * the known issue from spamming the logs.
 432         */
 433        return rcu_dereference_protected(active->request, 1);
 434}
 435
 436/**
 437 * i915_gem_active_raw - return the active request
 438 * @active - the active tracker
 439 *
 440 * i915_gem_active_raw() returns the current request being tracked, or NULL.
 441 * It does not obtain a reference on the request for the caller, so the caller
 442 * must hold struct_mutex.
 443 */
 444static inline struct drm_i915_gem_request *
 445i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
 446{
 447        return rcu_dereference_protected(active->request,
 448                                         lockdep_is_held(mutex));
 449}
 450
 451/**
 452 * i915_gem_active_peek - report the active request being monitored
 453 * @active - the active tracker
 454 *
 455 * i915_gem_active_peek() returns the current request being tracked if
 456 * still active, or NULL. It does not obtain a reference on the request
 457 * for the caller, so the caller must hold struct_mutex.
 458 */
 459static inline struct drm_i915_gem_request *
 460i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 461{
 462        struct drm_i915_gem_request *request;
 463
 464        request = i915_gem_active_raw(active, mutex);
 465        if (!request || i915_gem_request_completed(request))
 466                return NULL;
 467
 468        return request;
 469}
 470
 471/**
 472 * i915_gem_active_get - return a reference to the active request
 473 * @active - the active tracker
 474 *
 475 * i915_gem_active_get() returns a reference to the active request, or NULL
 476 * if the active tracker is idle. The caller must hold struct_mutex.
 477 */
 478static inline struct drm_i915_gem_request *
 479i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 480{
 481        return i915_gem_request_get(i915_gem_active_peek(active, mutex));
 482}
 483
 484/**
 485 * __i915_gem_active_get_rcu - return a reference to the active request
 486 * @active - the active tracker
 487 *
 488 * __i915_gem_active_get() returns a reference to the active request, or NULL
 489 * if the active tracker is idle. The caller must hold the RCU read lock, but
 490 * the returned pointer is safe to use outside of RCU.
 491 */
 492static inline struct drm_i915_gem_request *
 493__i915_gem_active_get_rcu(const struct i915_gem_active *active)
 494{
 495        /* Performing a lockless retrieval of the active request is super
 496         * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
 497         * slab of request objects will not be freed whilst we hold the
 498         * RCU read lock. It does not guarantee that the request itself
 499         * will not be freed and then *reused*. Viz,
 500         *
 501         * Thread A                     Thread B
 502         *
 503         * req = active.request
 504         *                              retire(req) -> free(req);
 505         *                              (req is now first on the slab freelist)
 506         *                              active.request = NULL
 507         *
 508         *                              req = new submission on a new object
 509         * ref(req)
 510         *
 511         * To prevent the request from being reused whilst the caller
 512         * uses it, we take a reference like normal. Whilst acquiring
 513         * the reference we check that it is not in a destroyed state
 514         * (refcnt == 0). That prevents the request being reallocated
 515         * whilst the caller holds on to it. To check that the request
 516         * was not reallocated as we acquired the reference we have to
 517         * check that our request remains the active request across
 518         * the lookup, in the same manner as a seqlock. The visibility
 519         * of the pointer versus the reference counting is controlled
 520         * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
 521         *
 522         * In the middle of all that, we inspect whether the request is
 523         * complete. Retiring is lazy so the request may be completed long
 524         * before the active tracker is updated. Querying whether the
 525         * request is complete is far cheaper (as it involves no locked
 526         * instructions setting cachelines to exclusive) than acquiring
 527         * the reference, so we do it first. The RCU read lock ensures the
 528         * pointer dereference is valid, but does not ensure that the
 529         * seqno nor HWS is the right one! However, if the request was
 530         * reallocated, that means the active tracker's request was complete.
 531         * If the new request is also complete, then both are and we can
 532         * just report the active tracker is idle. If the new request is
 533         * incomplete, then we acquire a reference on it and check that
 534         * it remained the active request.
 535         *
 536         * It is then imperative that we do not zero the request on
 537         * reallocation, so that we can chase the dangling pointers!
 538         * See i915_gem_request_alloc().
 539         */
 540        do {
 541                struct drm_i915_gem_request *request;
 542
 543                request = rcu_dereference(active->request);
 544                if (!request || i915_gem_request_completed(request))
 545                        return NULL;
 546
 547                /* An especially silly compiler could decide to recompute the
 548                 * result of i915_gem_request_completed, more specifically
 549                 * re-emit the load for request->fence.seqno. A race would catch
 550                 * a later seqno value, which could flip the result from true to
 551                 * false. Which means part of the instructions below might not
 552                 * be executed, while later on instructions are executed. Due to
 553                 * barriers within the refcounting the inconsistency can't reach
 554                 * past the call to i915_gem_request_get_rcu, but not executing
 555                 * that while still executing i915_gem_request_put() creates
 556                 * havoc enough.  Prevent this with a compiler barrier.
 557                 */
 558                barrier();
 559
 560                request = i915_gem_request_get_rcu(request);
 561
 562                /* What stops the following rcu_access_pointer() from occurring
 563                 * before the above i915_gem_request_get_rcu()? If we were
 564                 * to read the value before pausing to get the reference to
 565                 * the request, we may not notice a change in the active
 566                 * tracker.
 567                 *
 568                 * The rcu_access_pointer() is a mere compiler barrier, which
 569                 * means both the CPU and compiler are free to perform the
 570                 * memory read without constraint. The compiler only has to
 571                 * ensure that any operations after the rcu_access_pointer()
 572                 * occur afterwards in program order. This means the read may
 573                 * be performed earlier by an out-of-order CPU, or adventurous
 574                 * compiler.
 575                 *
 576                 * The atomic operation at the heart of
 577                 * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is
 578                 * atomic_inc_not_zero() which is only a full memory barrier
 579                 * when successful. That is, if i915_gem_request_get_rcu()
 580                 * returns the request (and so with the reference counted
 581                 * incremented) then the following read for rcu_access_pointer()
 582                 * must occur after the atomic operation and so confirm
 583                 * that this request is the one currently being tracked.
 584                 *
 585                 * The corresponding write barrier is part of
 586                 * rcu_assign_pointer().
 587                 */
 588                if (!request || request == rcu_access_pointer(active->request))
 589                        return rcu_pointer_handoff(request);
 590
 591                i915_gem_request_put(request);
 592        } while (1);
 593}
 594
 595/**
 596 * i915_gem_active_get_unlocked - return a reference to the active request
 597 * @active - the active tracker
 598 *
 599 * i915_gem_active_get_unlocked() returns a reference to the active request,
 600 * or NULL if the active tracker is idle. The reference is obtained under RCU,
 601 * so no locking is required by the caller.
 602 *
 603 * The reference should be freed with i915_gem_request_put().
 604 */
 605static inline struct drm_i915_gem_request *
 606i915_gem_active_get_unlocked(const struct i915_gem_active *active)
 607{
 608        struct drm_i915_gem_request *request;
 609
 610        rcu_read_lock();
 611        request = __i915_gem_active_get_rcu(active);
 612        rcu_read_unlock();
 613
 614        return request;
 615}
 616
 617/**
 618 * i915_gem_active_isset - report whether the active tracker is assigned
 619 * @active - the active tracker
 620 *
 621 * i915_gem_active_isset() returns true if the active tracker is currently
 622 * assigned to a request. Due to the lazy retiring, that request may be idle
 623 * and this may report stale information.
 624 */
 625static inline bool
 626i915_gem_active_isset(const struct i915_gem_active *active)
 627{
 628        return rcu_access_pointer(active->request);
 629}
 630
 631/**
 632 * i915_gem_active_wait - waits until the request is completed
 633 * @active - the active request on which to wait
 634 * @flags - how to wait
 635 * @timeout - how long to wait at most
 636 * @rps - userspace client to charge for a waitboost
 637 *
 638 * i915_gem_active_wait() waits until the request is completed before
 639 * returning, without requiring any locks to be held. Note that it does not
 640 * retire any requests before returning.
 641 *
 642 * This function relies on RCU in order to acquire the reference to the active
 643 * request without holding any locks. See __i915_gem_active_get_rcu() for the
 644 * glory details on how that is managed. Once the reference is acquired, we
 645 * can then wait upon the request, and afterwards release our reference,
 646 * free of any locking.
 647 *
 648 * This function wraps i915_wait_request(), see it for the full details on
 649 * the arguments.
 650 *
 651 * Returns 0 if successful, or a negative error code.
 652 */
 653static inline int
 654i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
 655{
 656        struct drm_i915_gem_request *request;
 657        long ret = 0;
 658
 659        request = i915_gem_active_get_unlocked(active);
 660        if (request) {
 661                ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT);
 662                i915_gem_request_put(request);
 663        }
 664
 665        return ret < 0 ? ret : 0;
 666}
 667
 668/**
 669 * i915_gem_active_retire - waits until the request is retired
 670 * @active - the active request on which to wait
 671 *
 672 * i915_gem_active_retire() waits until the request is completed,
 673 * and then ensures that at least the retirement handler for this
 674 * @active tracker is called before returning. If the @active
 675 * tracker is idle, the function returns immediately.
 676 */
 677static inline int __must_check
 678i915_gem_active_retire(struct i915_gem_active *active,
 679                       struct mutex *mutex)
 680{
 681        struct drm_i915_gem_request *request;
 682        long ret;
 683
 684        request = i915_gem_active_raw(active, mutex);
 685        if (!request)
 686                return 0;
 687
 688        ret = i915_wait_request(request,
 689                                I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
 690                                MAX_SCHEDULE_TIMEOUT);
 691        if (ret < 0)
 692                return ret;
 693
 694        list_del_init(&active->link);
 695        RCU_INIT_POINTER(active->request, NULL);
 696
 697        active->retire(active, request);
 698
 699        return 0;
 700}
 701
 702#define for_each_active(mask, idx) \
 703        for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
 704
 705#endif /* I915_GEM_REQUEST_H */
 706