linux/drivers/gpu/drm/i915/i915_request.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/dma-fence-array.h>
  26#include <linux/dma-fence-chain.h>
  27#include <linux/irq_work.h>
  28#include <linux/prefetch.h>
  29#include <linux/sched.h>
  30#include <linux/sched/clock.h>
  31#include <linux/sched/signal.h>
  32
  33#include "gem/i915_gem_context.h"
  34#include "gt/intel_breadcrumbs.h"
  35#include "gt/intel_context.h"
  36#include "gt/intel_engine.h"
  37#include "gt/intel_engine_heartbeat.h"
  38#include "gt/intel_gpu_commands.h"
  39#include "gt/intel_reset.h"
  40#include "gt/intel_ring.h"
  41#include "gt/intel_rps.h"
  42
  43#include "i915_active.h"
  44#include "i915_drv.h"
  45#include "i915_trace.h"
  46#include "intel_pm.h"
  47
  48struct execute_cb {
  49        struct irq_work work;
  50        struct i915_sw_fence *fence;
  51        struct i915_request *signal;
  52};
  53
  54static struct kmem_cache *slab_requests;
  55static struct kmem_cache *slab_execute_cbs;
  56
  57static const char *i915_fence_get_driver_name(struct dma_fence *fence)
  58{
  59        return dev_name(to_request(fence)->engine->i915->drm.dev);
  60}
  61
  62static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
  63{
  64        const struct i915_gem_context *ctx;
  65
  66        /*
  67         * The timeline struct (as part of the ppgtt underneath a context)
  68         * may be freed when the request is no longer in use by the GPU.
  69         * We could extend the life of a context to beyond that of all
  70         * fences, possibly keeping the hw resource around indefinitely,
  71         * or we just give them a false name. Since
  72         * dma_fence_ops.get_timeline_name is a debug feature, the occasional
  73         * lie seems justifiable.
  74         */
  75        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  76                return "signaled";
  77
  78        ctx = i915_request_gem_context(to_request(fence));
  79        if (!ctx)
  80                return "[" DRIVER_NAME "]";
  81
  82        return ctx->name;
  83}
  84
  85static bool i915_fence_signaled(struct dma_fence *fence)
  86{
  87        return i915_request_completed(to_request(fence));
  88}
  89
  90static bool i915_fence_enable_signaling(struct dma_fence *fence)
  91{
  92        return i915_request_enable_breadcrumb(to_request(fence));
  93}
  94
  95static signed long i915_fence_wait(struct dma_fence *fence,
  96                                   bool interruptible,
  97                                   signed long timeout)
  98{
  99        return i915_request_wait(to_request(fence),
 100                                 interruptible | I915_WAIT_PRIORITY,
 101                                 timeout);
 102}
 103
 104struct kmem_cache *i915_request_slab_cache(void)
 105{
 106        return slab_requests;
 107}
 108
 109static void i915_fence_release(struct dma_fence *fence)
 110{
 111        struct i915_request *rq = to_request(fence);
 112
 113        GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
 114                   rq->guc_prio != GUC_PRIO_FINI);
 115
 116        /*
 117         * The request is put onto a RCU freelist (i.e. the address
 118         * is immediately reused), mark the fences as being freed now.
 119         * Otherwise the debugobjects for the fences are only marked as
 120         * freed when the slab cache itself is freed, and so we would get
 121         * caught trying to reuse dead objects.
 122         */
 123        i915_sw_fence_fini(&rq->submit);
 124        i915_sw_fence_fini(&rq->semaphore);
 125
 126        /*
 127         * Keep one request on each engine for reserved use under mempressure,
 128         * do not use with virtual engines as this really is only needed for
 129         * kernel contexts.
 130         */
 131        if (!intel_engine_is_virtual(rq->engine) &&
 132            !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
 133                intel_context_put(rq->context);
 134                return;
 135        }
 136
 137        intel_context_put(rq->context);
 138
 139        kmem_cache_free(slab_requests, rq);
 140}
 141
 142const struct dma_fence_ops i915_fence_ops = {
 143        .get_driver_name = i915_fence_get_driver_name,
 144        .get_timeline_name = i915_fence_get_timeline_name,
 145        .enable_signaling = i915_fence_enable_signaling,
 146        .signaled = i915_fence_signaled,
 147        .wait = i915_fence_wait,
 148        .release = i915_fence_release,
 149};
 150
 151static void irq_execute_cb(struct irq_work *wrk)
 152{
 153        struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
 154
 155        i915_sw_fence_complete(cb->fence);
 156        kmem_cache_free(slab_execute_cbs, cb);
 157}
 158
 159static __always_inline void
 160__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
 161{
 162        struct execute_cb *cb, *cn;
 163
 164        if (llist_empty(&rq->execute_cb))
 165                return;
 166
 167        llist_for_each_entry_safe(cb, cn,
 168                                  llist_del_all(&rq->execute_cb),
 169                                  work.node.llist)
 170                fn(&cb->work);
 171}
 172
 173static void __notify_execute_cb_irq(struct i915_request *rq)
 174{
 175        __notify_execute_cb(rq, irq_work_queue);
 176}
 177
 178static bool irq_work_imm(struct irq_work *wrk)
 179{
 180        wrk->func(wrk);
 181        return false;
 182}
 183
 184void i915_request_notify_execute_cb_imm(struct i915_request *rq)
 185{
 186        __notify_execute_cb(rq, irq_work_imm);
 187}
 188
 189static void free_capture_list(struct i915_request *request)
 190{
 191        struct i915_capture_list *capture;
 192
 193        capture = fetch_and_zero(&request->capture_list);
 194        while (capture) {
 195                struct i915_capture_list *next = capture->next;
 196
 197                kfree(capture);
 198                capture = next;
 199        }
 200}
 201
 202static void __i915_request_fill(struct i915_request *rq, u8 val)
 203{
 204        void *vaddr = rq->ring->vaddr;
 205        u32 head;
 206
 207        head = rq->infix;
 208        if (rq->postfix < head) {
 209                memset(vaddr + head, val, rq->ring->size - head);
 210                head = 0;
 211        }
 212        memset(vaddr + head, val, rq->postfix - head);
 213}
 214
 215/**
 216 * i915_request_active_engine
 217 * @rq: request to inspect
 218 * @active: pointer in which to return the active engine
 219 *
 220 * Fills the currently active engine to the @active pointer if the request
 221 * is active and still not completed.
 222 *
 223 * Returns true if request was active or false otherwise.
 224 */
 225bool
 226i915_request_active_engine(struct i915_request *rq,
 227                           struct intel_engine_cs **active)
 228{
 229        struct intel_engine_cs *engine, *locked;
 230        bool ret = false;
 231
 232        /*
 233         * Serialise with __i915_request_submit() so that it sees
 234         * is-banned?, or we know the request is already inflight.
 235         *
 236         * Note that rq->engine is unstable, and so we double
 237         * check that we have acquired the lock on the final engine.
 238         */
 239        locked = READ_ONCE(rq->engine);
 240        spin_lock_irq(&locked->sched_engine->lock);
 241        while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
 242                spin_unlock(&locked->sched_engine->lock);
 243                locked = engine;
 244                spin_lock(&locked->sched_engine->lock);
 245        }
 246
 247        if (i915_request_is_active(rq)) {
 248                if (!__i915_request_is_complete(rq))
 249                        *active = locked;
 250                ret = true;
 251        }
 252
 253        spin_unlock_irq(&locked->sched_engine->lock);
 254
 255        return ret;
 256}
 257
 258static void __rq_init_watchdog(struct i915_request *rq)
 259{
 260        rq->watchdog.timer.function = NULL;
 261}
 262
 263static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
 264{
 265        struct i915_request *rq =
 266                container_of(hrtimer, struct i915_request, watchdog.timer);
 267        struct intel_gt *gt = rq->engine->gt;
 268
 269        if (!i915_request_completed(rq)) {
 270                if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
 271                        schedule_work(&gt->watchdog.work);
 272        } else {
 273                i915_request_put(rq);
 274        }
 275
 276        return HRTIMER_NORESTART;
 277}
 278
 279static void __rq_arm_watchdog(struct i915_request *rq)
 280{
 281        struct i915_request_watchdog *wdg = &rq->watchdog;
 282        struct intel_context *ce = rq->context;
 283
 284        if (!ce->watchdog.timeout_us)
 285                return;
 286
 287        i915_request_get(rq);
 288
 289        hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 290        wdg->timer.function = __rq_watchdog_expired;
 291        hrtimer_start_range_ns(&wdg->timer,
 292                               ns_to_ktime(ce->watchdog.timeout_us *
 293                                           NSEC_PER_USEC),
 294                               NSEC_PER_MSEC,
 295                               HRTIMER_MODE_REL);
 296}
 297
 298static void __rq_cancel_watchdog(struct i915_request *rq)
 299{
 300        struct i915_request_watchdog *wdg = &rq->watchdog;
 301
 302        if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
 303                i915_request_put(rq);
 304}
 305
 306bool i915_request_retire(struct i915_request *rq)
 307{
 308        if (!__i915_request_is_complete(rq))
 309                return false;
 310
 311        RQ_TRACE(rq, "\n");
 312
 313        GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 314        trace_i915_request_retire(rq);
 315        i915_request_mark_complete(rq);
 316
 317        __rq_cancel_watchdog(rq);
 318
 319        /*
 320         * We know the GPU must have read the request to have
 321         * sent us the seqno + interrupt, so use the position
 322         * of tail of the request to update the last known position
 323         * of the GPU head.
 324         *
 325         * Note this requires that we are always called in request
 326         * completion order.
 327         */
 328        GEM_BUG_ON(!list_is_first(&rq->link,
 329                                  &i915_request_timeline(rq)->requests));
 330        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 331                /* Poison before we release our space in the ring */
 332                __i915_request_fill(rq, POISON_FREE);
 333        rq->ring->head = rq->postfix;
 334
 335        if (!i915_request_signaled(rq)) {
 336                spin_lock_irq(&rq->lock);
 337                dma_fence_signal_locked(&rq->fence);
 338                spin_unlock_irq(&rq->lock);
 339        }
 340
 341        if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
 342                atomic_dec(&rq->engine->gt->rps.num_waiters);
 343
 344        /*
 345         * We only loosely track inflight requests across preemption,
 346         * and so we may find ourselves attempting to retire a _completed_
 347         * request that we have removed from the HW and put back on a run
 348         * queue.
 349         *
 350         * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be
 351         * after removing the breadcrumb and signaling it, so that we do not
 352         * inadvertently attach the breadcrumb to a completed request.
 353         */
 354        rq->engine->remove_active_request(rq);
 355        GEM_BUG_ON(!llist_empty(&rq->execute_cb));
 356
 357        __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
 358
 359        intel_context_exit(rq->context);
 360        intel_context_unpin(rq->context);
 361
 362        free_capture_list(rq);
 363        i915_sched_node_fini(&rq->sched);
 364        i915_request_put(rq);
 365
 366        return true;
 367}
 368
 369void i915_request_retire_upto(struct i915_request *rq)
 370{
 371        struct intel_timeline * const tl = i915_request_timeline(rq);
 372        struct i915_request *tmp;
 373
 374        RQ_TRACE(rq, "\n");
 375        GEM_BUG_ON(!__i915_request_is_complete(rq));
 376
 377        do {
 378                tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
 379                GEM_BUG_ON(!i915_request_completed(tmp));
 380        } while (i915_request_retire(tmp) && tmp != rq);
 381}
 382
 383static struct i915_request * const *
 384__engine_active(struct intel_engine_cs *engine)
 385{
 386        return READ_ONCE(engine->execlists.active);
 387}
 388
 389static bool __request_in_flight(const struct i915_request *signal)
 390{
 391        struct i915_request * const *port, *rq;
 392        bool inflight = false;
 393
 394        if (!i915_request_is_ready(signal))
 395                return false;
 396
 397        /*
 398         * Even if we have unwound the request, it may still be on
 399         * the GPU (preempt-to-busy). If that request is inside an
 400         * unpreemptible critical section, it will not be removed. Some
 401         * GPU functions may even be stuck waiting for the paired request
 402         * (__await_execution) to be submitted and cannot be preempted
 403         * until the bond is executing.
 404         *
 405         * As we know that there are always preemption points between
 406         * requests, we know that only the currently executing request
 407         * may be still active even though we have cleared the flag.
 408         * However, we can't rely on our tracking of ELSP[0] to know
 409         * which request is currently active and so maybe stuck, as
 410         * the tracking maybe an event behind. Instead assume that
 411         * if the context is still inflight, then it is still active
 412         * even if the active flag has been cleared.
 413         *
 414         * To further complicate matters, if there a pending promotion, the HW
 415         * may either perform a context switch to the second inflight execlists,
 416         * or it may switch to the pending set of execlists. In the case of the
 417         * latter, it may send the ACK and we process the event copying the
 418         * pending[] over top of inflight[], _overwriting_ our *active. Since
 419         * this implies the HW is arbitrating and not struck in *active, we do
 420         * not worry about complete accuracy, but we do require no read/write
 421         * tearing of the pointer [the read of the pointer must be valid, even
 422         * as the array is being overwritten, for which we require the writes
 423         * to avoid tearing.]
 424         *
 425         * Note that the read of *execlists->active may race with the promotion
 426         * of execlists->pending[] to execlists->inflight[], overwritting
 427         * the value at *execlists->active. This is fine. The promotion implies
 428         * that we received an ACK from the HW, and so the context is not
 429         * stuck -- if we do not see ourselves in *active, the inflight status
 430         * is valid. If instead we see ourselves being copied into *active,
 431         * we are inflight and may signal the callback.
 432         */
 433        if (!intel_context_inflight(signal->context))
 434                return false;
 435
 436        rcu_read_lock();
 437        for (port = __engine_active(signal->engine);
 438             (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
 439             port++) {
 440                if (rq->context == signal->context) {
 441                        inflight = i915_seqno_passed(rq->fence.seqno,
 442                                                     signal->fence.seqno);
 443                        break;
 444                }
 445        }
 446        rcu_read_unlock();
 447
 448        return inflight;
 449}
 450
 451static int
 452__await_execution(struct i915_request *rq,
 453                  struct i915_request *signal,
 454                  gfp_t gfp)
 455{
 456        struct execute_cb *cb;
 457
 458        if (i915_request_is_active(signal))
 459                return 0;
 460
 461        cb = kmem_cache_alloc(slab_execute_cbs, gfp);
 462        if (!cb)
 463                return -ENOMEM;
 464
 465        cb->fence = &rq->submit;
 466        i915_sw_fence_await(cb->fence);
 467        init_irq_work(&cb->work, irq_execute_cb);
 468
 469        /*
 470         * Register the callback first, then see if the signaler is already
 471         * active. This ensures that if we race with the
 472         * __notify_execute_cb from i915_request_submit() and we are not
 473         * included in that list, we get a second bite of the cherry and
 474         * execute it ourselves. After this point, a future
 475         * i915_request_submit() will notify us.
 476         *
 477         * In i915_request_retire() we set the ACTIVE bit on a completed
 478         * request (then flush the execute_cb). So by registering the
 479         * callback first, then checking the ACTIVE bit, we serialise with
 480         * the completed/retired request.
 481         */
 482        if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
 483                if (i915_request_is_active(signal) ||
 484                    __request_in_flight(signal))
 485                        i915_request_notify_execute_cb_imm(signal);
 486        }
 487
 488        return 0;
 489}
 490
 491static bool fatal_error(int error)
 492{
 493        switch (error) {
 494        case 0: /* not an error! */
 495        case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
 496        case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
 497                return false;
 498        default:
 499                return true;
 500        }
 501}
 502
 503void __i915_request_skip(struct i915_request *rq)
 504{
 505        GEM_BUG_ON(!fatal_error(rq->fence.error));
 506
 507        if (rq->infix == rq->postfix)
 508                return;
 509
 510        RQ_TRACE(rq, "error: %d\n", rq->fence.error);
 511
 512        /*
 513         * As this request likely depends on state from the lost
 514         * context, clear out all the user operations leaving the
 515         * breadcrumb at the end (so we get the fence notifications).
 516         */
 517        __i915_request_fill(rq, 0);
 518        rq->infix = rq->postfix;
 519}
 520
 521bool i915_request_set_error_once(struct i915_request *rq, int error)
 522{
 523        int old;
 524
 525        GEM_BUG_ON(!IS_ERR_VALUE((long)error));
 526
 527        if (i915_request_signaled(rq))
 528                return false;
 529
 530        old = READ_ONCE(rq->fence.error);
 531        do {
 532                if (fatal_error(old))
 533                        return false;
 534        } while (!try_cmpxchg(&rq->fence.error, &old, error));
 535
 536        return true;
 537}
 538
 539struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 540{
 541        if (__i915_request_is_complete(rq))
 542                return NULL;
 543
 544        GEM_BUG_ON(i915_request_signaled(rq));
 545
 546        /* As soon as the request is completed, it may be retired */
 547        rq = i915_request_get(rq);
 548
 549        i915_request_set_error_once(rq, -EIO);
 550        i915_request_mark_complete(rq);
 551
 552        return rq;
 553}
 554
 555bool __i915_request_submit(struct i915_request *request)
 556{
 557        struct intel_engine_cs *engine = request->engine;
 558        bool result = false;
 559
 560        RQ_TRACE(request, "\n");
 561
 562        GEM_BUG_ON(!irqs_disabled());
 563        lockdep_assert_held(&engine->sched_engine->lock);
 564
 565        /*
 566         * With the advent of preempt-to-busy, we frequently encounter
 567         * requests that we have unsubmitted from HW, but left running
 568         * until the next ack and so have completed in the meantime. On
 569         * resubmission of that completed request, we can skip
 570         * updating the payload, and execlists can even skip submitting
 571         * the request.
 572         *
 573         * We must remove the request from the caller's priority queue,
 574         * and the caller must only call us when the request is in their
 575         * priority queue, under the sched_engine->lock. This ensures that the
 576         * request has *not* yet been retired and we can safely move
 577         * the request into the engine->active.list where it will be
 578         * dropped upon retiring. (Otherwise if resubmit a *retired*
 579         * request, this would be a horrible use-after-free.)
 580         */
 581        if (__i915_request_is_complete(request)) {
 582                list_del_init(&request->sched.link);
 583                goto active;
 584        }
 585
 586        if (unlikely(intel_context_is_banned(request->context)))
 587                i915_request_set_error_once(request, -EIO);
 588
 589        if (unlikely(fatal_error(request->fence.error)))
 590                __i915_request_skip(request);
 591
 592        /*
 593         * Are we using semaphores when the gpu is already saturated?
 594         *
 595         * Using semaphores incurs a cost in having the GPU poll a
 596         * memory location, busywaiting for it to change. The continual
 597         * memory reads can have a noticeable impact on the rest of the
 598         * system with the extra bus traffic, stalling the cpu as it too
 599         * tries to access memory across the bus (perf stat -e bus-cycles).
 600         *
 601         * If we installed a semaphore on this request and we only submit
 602         * the request after the signaler completed, that indicates the
 603         * system is overloaded and using semaphores at this time only
 604         * increases the amount of work we are doing. If so, we disable
 605         * further use of semaphores until we are idle again, whence we
 606         * optimistically try again.
 607         */
 608        if (request->sched.semaphores &&
 609            i915_sw_fence_signaled(&request->semaphore))
 610                engine->saturated |= request->sched.semaphores;
 611
 612        engine->emit_fini_breadcrumb(request,
 613                                     request->ring->vaddr + request->postfix);
 614
 615        trace_i915_request_execute(request);
 616        if (engine->bump_serial)
 617                engine->bump_serial(engine);
 618        else
 619                engine->serial++;
 620
 621        result = true;
 622
 623        GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 624        engine->add_active_request(request);
 625active:
 626        clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 627        set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 628
 629        /*
 630         * XXX Rollback bonded-execution on __i915_request_unsubmit()?
 631         *
 632         * In the future, perhaps when we have an active time-slicing scheduler,
 633         * it will be interesting to unsubmit parallel execution and remove
 634         * busywaits from the GPU until their master is restarted. This is
 635         * quite hairy, we have to carefully rollback the fence and do a
 636         * preempt-to-idle cycle on the target engine, all the while the
 637         * master execute_cb may refire.
 638         */
 639        __notify_execute_cb_irq(request);
 640
 641        /* We may be recursing from the signal callback of another i915 fence */
 642        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 643                i915_request_enable_breadcrumb(request);
 644
 645        return result;
 646}
 647
 648void i915_request_submit(struct i915_request *request)
 649{
 650        struct intel_engine_cs *engine = request->engine;
 651        unsigned long flags;
 652
 653        /* Will be called from irq-context when using foreign fences. */
 654        spin_lock_irqsave(&engine->sched_engine->lock, flags);
 655
 656        __i915_request_submit(request);
 657
 658        spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 659}
 660
 661void __i915_request_unsubmit(struct i915_request *request)
 662{
 663        struct intel_engine_cs *engine = request->engine;
 664
 665        /*
 666         * Only unwind in reverse order, required so that the per-context list
 667         * is kept in seqno/ring order.
 668         */
 669        RQ_TRACE(request, "\n");
 670
 671        GEM_BUG_ON(!irqs_disabled());
 672        lockdep_assert_held(&engine->sched_engine->lock);
 673
 674        /*
 675         * Before we remove this breadcrumb from the signal list, we have
 676         * to ensure that a concurrent dma_fence_enable_signaling() does not
 677         * attach itself. We first mark the request as no longer active and
 678         * make sure that is visible to other cores, and then remove the
 679         * breadcrumb if attached.
 680         */
 681        GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 682        clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 683        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 684                i915_request_cancel_breadcrumb(request);
 685
 686        /* We've already spun, don't charge on resubmitting. */
 687        if (request->sched.semaphores && __i915_request_has_started(request))
 688                request->sched.semaphores = 0;
 689
 690        /*
 691         * We don't need to wake_up any waiters on request->execute, they
 692         * will get woken by any other event or us re-adding this request
 693         * to the engine timeline (__i915_request_submit()). The waiters
 694         * should be quite adapt at finding that the request now has a new
 695         * global_seqno to the one they went to sleep on.
 696         */
 697}
 698
 699void i915_request_unsubmit(struct i915_request *request)
 700{
 701        struct intel_engine_cs *engine = request->engine;
 702        unsigned long flags;
 703
 704        /* Will be called from irq-context when using foreign fences. */
 705        spin_lock_irqsave(&engine->sched_engine->lock, flags);
 706
 707        __i915_request_unsubmit(request);
 708
 709        spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 710}
 711
 712void i915_request_cancel(struct i915_request *rq, int error)
 713{
 714        if (!i915_request_set_error_once(rq, error))
 715                return;
 716
 717        set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
 718
 719        intel_context_cancel_request(rq->context, rq);
 720}
 721
 722static int __i915_sw_fence_call
 723submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 724{
 725        struct i915_request *request =
 726                container_of(fence, typeof(*request), submit);
 727
 728        switch (state) {
 729        case FENCE_COMPLETE:
 730                trace_i915_request_submit(request);
 731
 732                if (unlikely(fence->error))
 733                        i915_request_set_error_once(request, fence->error);
 734                else
 735                        __rq_arm_watchdog(request);
 736
 737                /*
 738                 * We need to serialize use of the submit_request() callback
 739                 * with its hotplugging performed during an emergency
 740                 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
 741                 * critical section in order to force i915_gem_set_wedged() to
 742                 * wait until the submit_request() is completed before
 743                 * proceeding.
 744                 */
 745                rcu_read_lock();
 746                request->engine->submit_request(request);
 747                rcu_read_unlock();
 748                break;
 749
 750        case FENCE_FREE:
 751                i915_request_put(request);
 752                break;
 753        }
 754
 755        return NOTIFY_DONE;
 756}
 757
 758static int __i915_sw_fence_call
 759semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 760{
 761        struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
 762
 763        switch (state) {
 764        case FENCE_COMPLETE:
 765                break;
 766
 767        case FENCE_FREE:
 768                i915_request_put(rq);
 769                break;
 770        }
 771
 772        return NOTIFY_DONE;
 773}
 774
 775static void retire_requests(struct intel_timeline *tl)
 776{
 777        struct i915_request *rq, *rn;
 778
 779        list_for_each_entry_safe(rq, rn, &tl->requests, link)
 780                if (!i915_request_retire(rq))
 781                        break;
 782}
 783
 784static noinline struct i915_request *
 785request_alloc_slow(struct intel_timeline *tl,
 786                   struct i915_request **rsvd,
 787                   gfp_t gfp)
 788{
 789        struct i915_request *rq;
 790
 791        /* If we cannot wait, dip into our reserves */
 792        if (!gfpflags_allow_blocking(gfp)) {
 793                rq = xchg(rsvd, NULL);
 794                if (!rq) /* Use the normal failure path for one final WARN */
 795                        goto out;
 796
 797                return rq;
 798        }
 799
 800        if (list_empty(&tl->requests))
 801                goto out;
 802
 803        /* Move our oldest request to the slab-cache (if not in use!) */
 804        rq = list_first_entry(&tl->requests, typeof(*rq), link);
 805        i915_request_retire(rq);
 806
 807        rq = kmem_cache_alloc(slab_requests,
 808                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 809        if (rq)
 810                return rq;
 811
 812        /* Ratelimit ourselves to prevent oom from malicious clients */
 813        rq = list_last_entry(&tl->requests, typeof(*rq), link);
 814        cond_synchronize_rcu(rq->rcustate);
 815
 816        /* Retire our old requests in the hope that we free some */
 817        retire_requests(tl);
 818
 819out:
 820        return kmem_cache_alloc(slab_requests, gfp);
 821}
 822
 823static void __i915_request_ctor(void *arg)
 824{
 825        struct i915_request *rq = arg;
 826
 827        spin_lock_init(&rq->lock);
 828        i915_sched_node_init(&rq->sched);
 829        i915_sw_fence_init(&rq->submit, submit_notify);
 830        i915_sw_fence_init(&rq->semaphore, semaphore_notify);
 831
 832        rq->capture_list = NULL;
 833
 834        init_llist_head(&rq->execute_cb);
 835}
 836
 837struct i915_request *
 838__i915_request_create(struct intel_context *ce, gfp_t gfp)
 839{
 840        struct intel_timeline *tl = ce->timeline;
 841        struct i915_request *rq;
 842        u32 seqno;
 843        int ret;
 844
 845        might_alloc(gfp);
 846
 847        /* Check that the caller provided an already pinned context */
 848        __intel_context_pin(ce);
 849
 850        /*
 851         * Beware: Dragons be flying overhead.
 852         *
 853         * We use RCU to look up requests in flight. The lookups may
 854         * race with the request being allocated from the slab freelist.
 855         * That is the request we are writing to here, may be in the process
 856         * of being read by __i915_active_request_get_rcu(). As such,
 857         * we have to be very careful when overwriting the contents. During
 858         * the RCU lookup, we change chase the request->engine pointer,
 859         * read the request->global_seqno and increment the reference count.
 860         *
 861         * The reference count is incremented atomically. If it is zero,
 862         * the lookup knows the request is unallocated and complete. Otherwise,
 863         * it is either still in use, or has been reallocated and reset
 864         * with dma_fence_init(). This increment is safe for release as we
 865         * check that the request we have a reference to and matches the active
 866         * request.
 867         *
 868         * Before we increment the refcount, we chase the request->engine
 869         * pointer. We must not call kmem_cache_zalloc() or else we set
 870         * that pointer to NULL and cause a crash during the lookup. If
 871         * we see the request is completed (based on the value of the
 872         * old engine and seqno), the lookup is complete and reports NULL.
 873         * If we decide the request is not completed (new engine or seqno),
 874         * then we grab a reference and double check that it is still the
 875         * active request - which it won't be and restart the lookup.
 876         *
 877         * Do not use kmem_cache_zalloc() here!
 878         */
 879        rq = kmem_cache_alloc(slab_requests,
 880                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 881        if (unlikely(!rq)) {
 882                rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
 883                if (!rq) {
 884                        ret = -ENOMEM;
 885                        goto err_unreserve;
 886                }
 887        }
 888
 889        /*
 890         * Hold a reference to the intel_context over life of an i915_request.
 891         * Without this an i915_request can exist after the context has been
 892         * destroyed (e.g. request retired, context closed, but user space holds
 893         * a reference to the request from an out fence). In the case of GuC
 894         * submission + virtual engine, the engine that the request references
 895         * is also destroyed which can trigger bad pointer dref in fence ops
 896         * (e.g. i915_fence_get_driver_name). We could likely change these
 897         * functions to avoid touching the engine but let's just be safe and
 898         * hold the intel_context reference. In execlist mode the request always
 899         * eventually points to a physical engine so this isn't an issue.
 900         */
 901        rq->context = intel_context_get(ce);
 902        rq->engine = ce->engine;
 903        rq->ring = ce->ring;
 904        rq->execution_mask = ce->engine->mask;
 905
 906        ret = intel_timeline_get_seqno(tl, rq, &seqno);
 907        if (ret)
 908                goto err_free;
 909
 910        dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
 911                       tl->fence_context, seqno);
 912
 913        RCU_INIT_POINTER(rq->timeline, tl);
 914        rq->hwsp_seqno = tl->hwsp_seqno;
 915        GEM_BUG_ON(__i915_request_is_complete(rq));
 916
 917        rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 918
 919        rq->guc_prio = GUC_PRIO_INIT;
 920
 921        /* We bump the ref for the fence chain */
 922        i915_sw_fence_reinit(&i915_request_get(rq)->submit);
 923        i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 924
 925        i915_sched_node_reinit(&rq->sched);
 926
 927        /* No zalloc, everything must be cleared after use */
 928        rq->batch = NULL;
 929        __rq_init_watchdog(rq);
 930        GEM_BUG_ON(rq->capture_list);
 931        GEM_BUG_ON(!llist_empty(&rq->execute_cb));
 932
 933        /*
 934         * Reserve space in the ring buffer for all the commands required to
 935         * eventually emit this request. This is to guarantee that the
 936         * i915_request_add() call can't fail. Note that the reserve may need
 937         * to be redone if the request is not actually submitted straight
 938         * away, e.g. because a GPU scheduler has deferred it.
 939         *
 940         * Note that due to how we add reserved_space to intel_ring_begin()
 941         * we need to double our request to ensure that if we need to wrap
 942         * around inside i915_request_add() there is sufficient space at
 943         * the beginning of the ring as well.
 944         */
 945        rq->reserved_space =
 946                2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
 947
 948        /*
 949         * Record the position of the start of the request so that
 950         * should we detect the updated seqno part-way through the
 951         * GPU processing the request, we never over-estimate the
 952         * position of the head.
 953         */
 954        rq->head = rq->ring->emit;
 955
 956        ret = rq->engine->request_alloc(rq);
 957        if (ret)
 958                goto err_unwind;
 959
 960        rq->infix = rq->ring->emit; /* end of header; start of user payload */
 961
 962        intel_context_mark_active(ce);
 963        list_add_tail_rcu(&rq->link, &tl->requests);
 964
 965        return rq;
 966
 967err_unwind:
 968        ce->ring->emit = rq->head;
 969
 970        /* Make sure we didn't add ourselves to external state before freeing */
 971        GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
 972        GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 973
 974err_free:
 975        intel_context_put(ce);
 976        kmem_cache_free(slab_requests, rq);
 977err_unreserve:
 978        intel_context_unpin(ce);
 979        return ERR_PTR(ret);
 980}
 981
 982struct i915_request *
 983i915_request_create(struct intel_context *ce)
 984{
 985        struct i915_request *rq;
 986        struct intel_timeline *tl;
 987
 988        tl = intel_context_timeline_lock(ce);
 989        if (IS_ERR(tl))
 990                return ERR_CAST(tl);
 991
 992        /* Move our oldest request to the slab-cache (if not in use!) */
 993        rq = list_first_entry(&tl->requests, typeof(*rq), link);
 994        if (!list_is_last(&rq->link, &tl->requests))
 995                i915_request_retire(rq);
 996
 997        intel_context_enter(ce);
 998        rq = __i915_request_create(ce, GFP_KERNEL);
 999        intel_context_exit(ce); /* active reference transferred to request */
1000        if (IS_ERR(rq))
1001                goto err_unlock;
1002
1003        /* Check that we do not interrupt ourselves with a new request */
1004        rq->cookie = lockdep_pin_lock(&tl->mutex);
1005
1006        return rq;
1007
1008err_unlock:
1009        intel_context_timeline_unlock(tl);
1010        return rq;
1011}
1012
1013static int
1014i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1015{
1016        struct dma_fence *fence;
1017        int err;
1018
1019        if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1020                return 0;
1021
1022        if (i915_request_started(signal))
1023                return 0;
1024
1025        /*
1026         * The caller holds a reference on @signal, but we do not serialise
1027         * against it being retired and removed from the lists.
1028         *
1029         * We do not hold a reference to the request before @signal, and
1030         * so must be very careful to ensure that it is not _recycled_ as
1031         * we follow the link backwards.
1032         */
1033        fence = NULL;
1034        rcu_read_lock();
1035        do {
1036                struct list_head *pos = READ_ONCE(signal->link.prev);
1037                struct i915_request *prev;
1038
1039                /* Confirm signal has not been retired, the link is valid */
1040                if (unlikely(__i915_request_has_started(signal)))
1041                        break;
1042
1043                /* Is signal the earliest request on its timeline? */
1044                if (pos == &rcu_dereference(signal->timeline)->requests)
1045                        break;
1046
1047                /*
1048                 * Peek at the request before us in the timeline. That
1049                 * request will only be valid before it is retired, so
1050                 * after acquiring a reference to it, confirm that it is
1051                 * still part of the signaler's timeline.
1052                 */
1053                prev = list_entry(pos, typeof(*prev), link);
1054                if (!i915_request_get_rcu(prev))
1055                        break;
1056
1057                /* After the strong barrier, confirm prev is still attached */
1058                if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
1059                        i915_request_put(prev);
1060                        break;
1061                }
1062
1063                fence = &prev->fence;
1064        } while (0);
1065        rcu_read_unlock();
1066        if (!fence)
1067                return 0;
1068
1069        err = 0;
1070        if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1071                err = i915_sw_fence_await_dma_fence(&rq->submit,
1072                                                    fence, 0,
1073                                                    I915_FENCE_GFP);
1074        dma_fence_put(fence);
1075
1076        return err;
1077}
1078
1079static intel_engine_mask_t
1080already_busywaiting(struct i915_request *rq)
1081{
1082        /*
1083         * Polling a semaphore causes bus traffic, delaying other users of
1084         * both the GPU and CPU. We want to limit the impact on others,
1085         * while taking advantage of early submission to reduce GPU
1086         * latency. Therefore we restrict ourselves to not using more
1087         * than one semaphore from each source, and not using a semaphore
1088         * if we have detected the engine is saturated (i.e. would not be
1089         * submitted early and cause bus traffic reading an already passed
1090         * semaphore).
1091         *
1092         * See the are-we-too-late? check in __i915_request_submit().
1093         */
1094        return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1095}
1096
1097static int
1098__emit_semaphore_wait(struct i915_request *to,
1099                      struct i915_request *from,
1100                      u32 seqno)
1101{
1102        const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
1103        u32 hwsp_offset;
1104        int len, err;
1105        u32 *cs;
1106
1107        GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
1108        GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
1109
1110        /* We need to pin the signaler's HWSP until we are finished reading. */
1111        err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
1112        if (err)
1113                return err;
1114
1115        len = 4;
1116        if (has_token)
1117                len += 2;
1118
1119        cs = intel_ring_begin(to, len);
1120        if (IS_ERR(cs))
1121                return PTR_ERR(cs);
1122
1123        /*
1124         * Using greater-than-or-equal here means we have to worry
1125         * about seqno wraparound. To side step that issue, we swap
1126         * the timeline HWSP upon wrapping, so that everyone listening
1127         * for the old (pre-wrap) values do not see the much smaller
1128         * (post-wrap) values than they were expecting (and so wait
1129         * forever).
1130         */
1131        *cs++ = (MI_SEMAPHORE_WAIT |
1132                 MI_SEMAPHORE_GLOBAL_GTT |
1133                 MI_SEMAPHORE_POLL |
1134                 MI_SEMAPHORE_SAD_GTE_SDD) +
1135                has_token;
1136        *cs++ = seqno;
1137        *cs++ = hwsp_offset;
1138        *cs++ = 0;
1139        if (has_token) {
1140                *cs++ = 0;
1141                *cs++ = MI_NOOP;
1142        }
1143
1144        intel_ring_advance(to, cs);
1145        return 0;
1146}
1147
1148static int
1149emit_semaphore_wait(struct i915_request *to,
1150                    struct i915_request *from,
1151                    gfp_t gfp)
1152{
1153        const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
1154        struct i915_sw_fence *wait = &to->submit;
1155
1156        if (!intel_context_use_semaphores(to->context))
1157                goto await_fence;
1158
1159        if (i915_request_has_initial_breadcrumb(to))
1160                goto await_fence;
1161
1162        /*
1163         * If this or its dependents are waiting on an external fence
1164         * that may fail catastrophically, then we want to avoid using
1165         * sempahores as they bypass the fence signaling metadata, and we
1166         * lose the fence->error propagation.
1167         */
1168        if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
1169                goto await_fence;
1170
1171        /* Just emit the first semaphore we see as request space is limited. */
1172        if (already_busywaiting(to) & mask)
1173                goto await_fence;
1174
1175        if (i915_request_await_start(to, from) < 0)
1176                goto await_fence;
1177
1178        /* Only submit our spinner after the signaler is running! */
1179        if (__await_execution(to, from, gfp))
1180                goto await_fence;
1181
1182        if (__emit_semaphore_wait(to, from, from->fence.seqno))
1183                goto await_fence;
1184
1185        to->sched.semaphores |= mask;
1186        wait = &to->semaphore;
1187
1188await_fence:
1189        return i915_sw_fence_await_dma_fence(wait,
1190                                             &from->fence, 0,
1191                                             I915_FENCE_GFP);
1192}
1193
1194static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
1195                                          struct dma_fence *fence)
1196{
1197        return __intel_timeline_sync_is_later(tl,
1198                                              fence->context,
1199                                              fence->seqno - 1);
1200}
1201
1202static int intel_timeline_sync_set_start(struct intel_timeline *tl,
1203                                         const struct dma_fence *fence)
1204{
1205        return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
1206}
1207
1208static int
1209__i915_request_await_execution(struct i915_request *to,
1210                               struct i915_request *from)
1211{
1212        int err;
1213
1214        GEM_BUG_ON(intel_context_is_barrier(from->context));
1215
1216        /* Submit both requests at the same time */
1217        err = __await_execution(to, from, I915_FENCE_GFP);
1218        if (err)
1219                return err;
1220
1221        /* Squash repeated depenendices to the same timelines */
1222        if (intel_timeline_sync_has_start(i915_request_timeline(to),
1223                                          &from->fence))
1224                return 0;
1225
1226        /*
1227         * Wait until the start of this request.
1228         *
1229         * The execution cb fires when we submit the request to HW. But in
1230         * many cases this may be long before the request itself is ready to
1231         * run (consider that we submit 2 requests for the same context, where
1232         * the request of interest is behind an indefinite spinner). So we hook
1233         * up to both to reduce our queues and keep the execution lag minimised
1234         * in the worst case, though we hope that the await_start is elided.
1235         */
1236        err = i915_request_await_start(to, from);
1237        if (err < 0)
1238                return err;
1239
1240        /*
1241         * Ensure both start together [after all semaphores in signal]
1242         *
1243         * Now that we are queued to the HW at roughly the same time (thanks
1244         * to the execute cb) and are ready to run at roughly the same time
1245         * (thanks to the await start), our signaler may still be indefinitely
1246         * delayed by waiting on a semaphore from a remote engine. If our
1247         * signaler depends on a semaphore, so indirectly do we, and we do not
1248         * want to start our payload until our signaler also starts theirs.
1249         * So we wait.
1250         *
1251         * However, there is also a second condition for which we need to wait
1252         * for the precise start of the signaler. Consider that the signaler
1253         * was submitted in a chain of requests following another context
1254         * (with just an ordinary intra-engine fence dependency between the
1255         * two). In this case the signaler is queued to HW, but not for
1256         * immediate execution, and so we must wait until it reaches the
1257         * active slot.
1258         */
1259        if (intel_engine_has_semaphores(to->engine) &&
1260            !i915_request_has_initial_breadcrumb(to)) {
1261                err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
1262                if (err < 0)
1263                        return err;
1264        }
1265
1266        /* Couple the dependency tree for PI on this exposed to->fence */
1267        if (to->engine->sched_engine->schedule) {
1268                err = i915_sched_node_add_dependency(&to->sched,
1269                                                     &from->sched,
1270                                                     I915_DEPENDENCY_WEAK);
1271                if (err < 0)
1272                        return err;
1273        }
1274
1275        return intel_timeline_sync_set_start(i915_request_timeline(to),
1276                                             &from->fence);
1277}
1278
1279static void mark_external(struct i915_request *rq)
1280{
1281        /*
1282         * The downside of using semaphores is that we lose metadata passing
1283         * along the signaling chain. This is particularly nasty when we
1284         * need to pass along a fatal error such as EFAULT or EDEADLK. For
1285         * fatal errors we want to scrub the request before it is executed,
1286         * which means that we cannot preload the request onto HW and have
1287         * it wait upon a semaphore.
1288         */
1289        rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1290}
1291
1292static int
1293__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1294{
1295        mark_external(rq);
1296        return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1297                                             i915_fence_context_timeout(rq->engine->i915,
1298                                                                        fence->context),
1299                                             I915_FENCE_GFP);
1300}
1301
1302static int
1303i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1304{
1305        struct dma_fence *iter;
1306        int err = 0;
1307
1308        if (!to_dma_fence_chain(fence))
1309                return __i915_request_await_external(rq, fence);
1310
1311        dma_fence_chain_for_each(iter, fence) {
1312                struct dma_fence_chain *chain = to_dma_fence_chain(iter);
1313
1314                if (!dma_fence_is_i915(chain->fence)) {
1315                        err = __i915_request_await_external(rq, iter);
1316                        break;
1317                }
1318
1319                err = i915_request_await_dma_fence(rq, chain->fence);
1320                if (err < 0)
1321                        break;
1322        }
1323
1324        dma_fence_put(iter);
1325        return err;
1326}
1327
1328int
1329i915_request_await_execution(struct i915_request *rq,
1330                             struct dma_fence *fence)
1331{
1332        struct dma_fence **child = &fence;
1333        unsigned int nchild = 1;
1334        int ret;
1335
1336        if (dma_fence_is_array(fence)) {
1337                struct dma_fence_array *array = to_dma_fence_array(fence);
1338
1339                /* XXX Error for signal-on-any fence arrays */
1340
1341                child = array->fences;
1342                nchild = array->num_fences;
1343                GEM_BUG_ON(!nchild);
1344        }
1345
1346        do {
1347                fence = *child++;
1348                if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1349                        continue;
1350
1351                if (fence->context == rq->fence.context)
1352                        continue;
1353
1354                /*
1355                 * We don't squash repeated fence dependencies here as we
1356                 * want to run our callback in all cases.
1357                 */
1358
1359                if (dma_fence_is_i915(fence))
1360                        ret = __i915_request_await_execution(rq,
1361                                                             to_request(fence));
1362                else
1363                        ret = i915_request_await_external(rq, fence);
1364                if (ret < 0)
1365                        return ret;
1366        } while (--nchild);
1367
1368        return 0;
1369}
1370
1371static int
1372await_request_submit(struct i915_request *to, struct i915_request *from)
1373{
1374        /*
1375         * If we are waiting on a virtual engine, then it may be
1376         * constrained to execute on a single engine *prior* to submission.
1377         * When it is submitted, it will be first submitted to the virtual
1378         * engine and then passed to the physical engine. We cannot allow
1379         * the waiter to be submitted immediately to the physical engine
1380         * as it may then bypass the virtual request.
1381         */
1382        if (to->engine == READ_ONCE(from->engine))
1383                return i915_sw_fence_await_sw_fence_gfp(&to->submit,
1384                                                        &from->submit,
1385                                                        I915_FENCE_GFP);
1386        else
1387                return __i915_request_await_execution(to, from);
1388}
1389
1390static int
1391i915_request_await_request(struct i915_request *to, struct i915_request *from)
1392{
1393        int ret;
1394
1395        GEM_BUG_ON(to == from);
1396        GEM_BUG_ON(to->timeline == from->timeline);
1397
1398        if (i915_request_completed(from)) {
1399                i915_sw_fence_set_error_once(&to->submit, from->fence.error);
1400                return 0;
1401        }
1402
1403        if (to->engine->sched_engine->schedule) {
1404                ret = i915_sched_node_add_dependency(&to->sched,
1405                                                     &from->sched,
1406                                                     I915_DEPENDENCY_EXTERNAL);
1407                if (ret < 0)
1408                        return ret;
1409        }
1410
1411        if (!intel_engine_uses_guc(to->engine) &&
1412            is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
1413                ret = await_request_submit(to, from);
1414        else
1415                ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
1416        if (ret < 0)
1417                return ret;
1418
1419        return 0;
1420}
1421
1422int
1423i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1424{
1425        struct dma_fence **child = &fence;
1426        unsigned int nchild = 1;
1427        int ret;
1428
1429        /*
1430         * Note that if the fence-array was created in signal-on-any mode,
1431         * we should *not* decompose it into its individual fences. However,
1432         * we don't currently store which mode the fence-array is operating
1433         * in. Fortunately, the only user of signal-on-any is private to
1434         * amdgpu and we should not see any incoming fence-array from
1435         * sync-file being in signal-on-any mode.
1436         */
1437        if (dma_fence_is_array(fence)) {
1438                struct dma_fence_array *array = to_dma_fence_array(fence);
1439
1440                child = array->fences;
1441                nchild = array->num_fences;
1442                GEM_BUG_ON(!nchild);
1443        }
1444
1445        do {
1446                fence = *child++;
1447                if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1448                        continue;
1449
1450                /*
1451                 * Requests on the same timeline are explicitly ordered, along
1452                 * with their dependencies, by i915_request_add() which ensures
1453                 * that requests are submitted in-order through each ring.
1454                 */
1455                if (fence->context == rq->fence.context)
1456                        continue;
1457
1458                /* Squash repeated waits to the same timelines */
1459                if (fence->context &&
1460                    intel_timeline_sync_is_later(i915_request_timeline(rq),
1461                                                 fence))
1462                        continue;
1463
1464                if (dma_fence_is_i915(fence))
1465                        ret = i915_request_await_request(rq, to_request(fence));
1466                else
1467                        ret = i915_request_await_external(rq, fence);
1468                if (ret < 0)
1469                        return ret;
1470
1471                /* Record the latest fence used against each timeline */
1472                if (fence->context)
1473                        intel_timeline_sync_set(i915_request_timeline(rq),
1474                                                fence);
1475        } while (--nchild);
1476
1477        return 0;
1478}
1479
1480/**
1481 * i915_request_await_object - set this request to (async) wait upon a bo
1482 * @to: request we are wishing to use
1483 * @obj: object which may be in use on another ring.
1484 * @write: whether the wait is on behalf of a writer
1485 *
1486 * This code is meant to abstract object synchronization with the GPU.
1487 * Conceptually we serialise writes between engines inside the GPU.
1488 * We only allow one engine to write into a buffer at any time, but
1489 * multiple readers. To ensure each has a coherent view of memory, we must:
1490 *
1491 * - If there is an outstanding write request to the object, the new
1492 *   request must wait for it to complete (either CPU or in hw, requests
1493 *   on the same ring will be naturally ordered).
1494 *
1495 * - If we are a write request (pending_write_domain is set), the new
1496 *   request must wait for outstanding read requests to complete.
1497 *
1498 * Returns 0 if successful, else propagates up the lower layer error.
1499 */
1500int
1501i915_request_await_object(struct i915_request *to,
1502                          struct drm_i915_gem_object *obj,
1503                          bool write)
1504{
1505        struct dma_fence *excl;
1506        int ret = 0;
1507
1508        if (write) {
1509                struct dma_fence **shared;
1510                unsigned int count, i;
1511
1512                ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
1513                                          &shared);
1514                if (ret)
1515                        return ret;
1516
1517                for (i = 0; i < count; i++) {
1518                        ret = i915_request_await_dma_fence(to, shared[i]);
1519                        if (ret)
1520                                break;
1521
1522                        dma_fence_put(shared[i]);
1523                }
1524
1525                for (; i < count; i++)
1526                        dma_fence_put(shared[i]);
1527                kfree(shared);
1528        } else {
1529                excl = dma_resv_get_excl_unlocked(obj->base.resv);
1530        }
1531
1532        if (excl) {
1533                if (ret == 0)
1534                        ret = i915_request_await_dma_fence(to, excl);
1535
1536                dma_fence_put(excl);
1537        }
1538
1539        return ret;
1540}
1541
1542static struct i915_request *
1543__i915_request_add_to_timeline(struct i915_request *rq)
1544{
1545        struct intel_timeline *timeline = i915_request_timeline(rq);
1546        struct i915_request *prev;
1547
1548        /*
1549         * Dependency tracking and request ordering along the timeline
1550         * is special cased so that we can eliminate redundant ordering
1551         * operations while building the request (we know that the timeline
1552         * itself is ordered, and here we guarantee it).
1553         *
1554         * As we know we will need to emit tracking along the timeline,
1555         * we embed the hooks into our request struct -- at the cost of
1556         * having to have specialised no-allocation interfaces (which will
1557         * be beneficial elsewhere).
1558         *
1559         * A second benefit to open-coding i915_request_await_request is
1560         * that we can apply a slight variant of the rules specialised
1561         * for timelines that jump between engines (such as virtual engines).
1562         * If we consider the case of virtual engine, we must emit a dma-fence
1563         * to prevent scheduling of the second request until the first is
1564         * complete (to maximise our greedy late load balancing) and this
1565         * precludes optimising to use semaphores serialisation of a single
1566         * timeline across engines.
1567         */
1568        prev = to_request(__i915_active_fence_set(&timeline->last_request,
1569                                                  &rq->fence));
1570        if (prev && !__i915_request_is_complete(prev)) {
1571                bool uses_guc = intel_engine_uses_guc(rq->engine);
1572
1573                /*
1574                 * The requests are supposed to be kept in order. However,
1575                 * we need to be wary in case the timeline->last_request
1576                 * is used as a barrier for external modification to this
1577                 * context.
1578                 */
1579                GEM_BUG_ON(prev->context == rq->context &&
1580                           i915_seqno_passed(prev->fence.seqno,
1581                                             rq->fence.seqno));
1582
1583                if ((!uses_guc &&
1584                     is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
1585                    (uses_guc && prev->context == rq->context))
1586                        i915_sw_fence_await_sw_fence(&rq->submit,
1587                                                     &prev->submit,
1588                                                     &rq->submitq);
1589                else
1590                        __i915_sw_fence_await_dma_fence(&rq->submit,
1591                                                        &prev->fence,
1592                                                        &rq->dmaq);
1593                if (rq->engine->sched_engine->schedule)
1594                        __i915_sched_node_add_dependency(&rq->sched,
1595                                                         &prev->sched,
1596                                                         &rq->dep,
1597                                                         0);
1598        }
1599
1600        /*
1601         * Make sure that no request gazumped us - if it was allocated after
1602         * our i915_request_alloc() and called __i915_request_add() before
1603         * us, the timeline will hold its seqno which is later than ours.
1604         */
1605        GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1606
1607        return prev;
1608}
1609
1610/*
1611 * NB: This function is not allowed to fail. Doing so would mean the the
1612 * request is not being tracked for completion but the work itself is
1613 * going to happen on the hardware. This would be a Bad Thing(tm).
1614 */
1615struct i915_request *__i915_request_commit(struct i915_request *rq)
1616{
1617        struct intel_engine_cs *engine = rq->engine;
1618        struct intel_ring *ring = rq->ring;
1619        u32 *cs;
1620
1621        RQ_TRACE(rq, "\n");
1622
1623        /*
1624         * To ensure that this call will not fail, space for its emissions
1625         * should already have been reserved in the ring buffer. Let the ring
1626         * know that it is time to use that space up.
1627         */
1628        GEM_BUG_ON(rq->reserved_space > ring->space);
1629        rq->reserved_space = 0;
1630        rq->emitted_jiffies = jiffies;
1631
1632        /*
1633         * Record the position of the start of the breadcrumb so that
1634         * should we detect the updated seqno part-way through the
1635         * GPU processing the request, we never over-estimate the
1636         * position of the ring's HEAD.
1637         */
1638        cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1639        GEM_BUG_ON(IS_ERR(cs));
1640        rq->postfix = intel_ring_offset(rq, cs);
1641
1642        return __i915_request_add_to_timeline(rq);
1643}
1644
1645void __i915_request_queue_bh(struct i915_request *rq)
1646{
1647        i915_sw_fence_commit(&rq->semaphore);
1648        i915_sw_fence_commit(&rq->submit);
1649}
1650
1651void __i915_request_queue(struct i915_request *rq,
1652                          const struct i915_sched_attr *attr)
1653{
1654        /*
1655         * Let the backend know a new request has arrived that may need
1656         * to adjust the existing execution schedule due to a high priority
1657         * request - i.e. we may want to preempt the current request in order
1658         * to run a high priority dependency chain *before* we can execute this
1659         * request.
1660         *
1661         * This is called before the request is ready to run so that we can
1662         * decide whether to preempt the entire chain so that it is ready to
1663         * run at the earliest possible convenience.
1664         */
1665        if (attr && rq->engine->sched_engine->schedule)
1666                rq->engine->sched_engine->schedule(rq, attr);
1667
1668        local_bh_disable();
1669        __i915_request_queue_bh(rq);
1670        local_bh_enable(); /* kick tasklets */
1671}
1672
1673void i915_request_add(struct i915_request *rq)
1674{
1675        struct intel_timeline * const tl = i915_request_timeline(rq);
1676        struct i915_sched_attr attr = {};
1677        struct i915_gem_context *ctx;
1678
1679        lockdep_assert_held(&tl->mutex);
1680        lockdep_unpin_lock(&tl->mutex, rq->cookie);
1681
1682        trace_i915_request_add(rq);
1683        __i915_request_commit(rq);
1684
1685        /* XXX placeholder for selftests */
1686        rcu_read_lock();
1687        ctx = rcu_dereference(rq->context->gem_context);
1688        if (ctx)
1689                attr = ctx->sched;
1690        rcu_read_unlock();
1691
1692        __i915_request_queue(rq, &attr);
1693
1694        mutex_unlock(&tl->mutex);
1695}
1696
1697static unsigned long local_clock_ns(unsigned int *cpu)
1698{
1699        unsigned long t;
1700
1701        /*
1702         * Cheaply and approximately convert from nanoseconds to microseconds.
1703         * The result and subsequent calculations are also defined in the same
1704         * approximate microseconds units. The principal source of timing
1705         * error here is from the simple truncation.
1706         *
1707         * Note that local_clock() is only defined wrt to the current CPU;
1708         * the comparisons are no longer valid if we switch CPUs. Instead of
1709         * blocking preemption for the entire busywait, we can detect the CPU
1710         * switch and use that as indicator of system load and a reason to
1711         * stop busywaiting, see busywait_stop().
1712         */
1713        *cpu = get_cpu();
1714        t = local_clock();
1715        put_cpu();
1716
1717        return t;
1718}
1719
1720static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1721{
1722        unsigned int this_cpu;
1723
1724        if (time_after(local_clock_ns(&this_cpu), timeout))
1725                return true;
1726
1727        return this_cpu != cpu;
1728}
1729
1730static bool __i915_spin_request(struct i915_request * const rq, int state)
1731{
1732        unsigned long timeout_ns;
1733        unsigned int cpu;
1734
1735        /*
1736         * Only wait for the request if we know it is likely to complete.
1737         *
1738         * We don't track the timestamps around requests, nor the average
1739         * request length, so we do not have a good indicator that this
1740         * request will complete within the timeout. What we do know is the
1741         * order in which requests are executed by the context and so we can
1742         * tell if the request has been started. If the request is not even
1743         * running yet, it is a fair assumption that it will not complete
1744         * within our relatively short timeout.
1745         */
1746        if (!i915_request_is_running(rq))
1747                return false;
1748
1749        /*
1750         * When waiting for high frequency requests, e.g. during synchronous
1751         * rendering split between the CPU and GPU, the finite amount of time
1752         * required to set up the irq and wait upon it limits the response
1753         * rate. By busywaiting on the request completion for a short while we
1754         * can service the high frequency waits as quick as possible. However,
1755         * if it is a slow request, we want to sleep as quickly as possible.
1756         * The tradeoff between waiting and sleeping is roughly the time it
1757         * takes to sleep on a request, on the order of a microsecond.
1758         */
1759
1760        timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1761        timeout_ns += local_clock_ns(&cpu);
1762        do {
1763                if (dma_fence_is_signaled(&rq->fence))
1764                        return true;
1765
1766                if (signal_pending_state(state, current))
1767                        break;
1768
1769                if (busywait_stop(timeout_ns, cpu))
1770                        break;
1771
1772                cpu_relax();
1773        } while (!need_resched());
1774
1775        return false;
1776}
1777
1778struct request_wait {
1779        struct dma_fence_cb cb;
1780        struct task_struct *tsk;
1781};
1782
1783static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1784{
1785        struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1786
1787        wake_up_process(fetch_and_zero(&wait->tsk));
1788}
1789
1790/**
1791 * i915_request_wait - wait until execution of request has finished
1792 * @rq: the request to wait upon
1793 * @flags: how to wait
1794 * @timeout: how long to wait in jiffies
1795 *
1796 * i915_request_wait() waits for the request to be completed, for a
1797 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1798 * unbounded wait).
1799 *
1800 * Returns the remaining time (in jiffies) if the request completed, which may
1801 * be zero or -ETIME if the request is unfinished after the timeout expires.
1802 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1803 * pending before the request completes.
1804 */
1805long i915_request_wait(struct i915_request *rq,
1806                       unsigned int flags,
1807                       long timeout)
1808{
1809        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1810                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1811        struct request_wait wait;
1812
1813        might_sleep();
1814        GEM_BUG_ON(timeout < 0);
1815
1816        if (dma_fence_is_signaled(&rq->fence))
1817                return timeout;
1818
1819        if (!timeout)
1820                return -ETIME;
1821
1822        trace_i915_request_wait_begin(rq, flags);
1823
1824        /*
1825         * We must never wait on the GPU while holding a lock as we
1826         * may need to perform a GPU reset. So while we don't need to
1827         * serialise wait/reset with an explicit lock, we do want
1828         * lockdep to detect potential dependency cycles.
1829         */
1830        mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1831
1832        /*
1833         * Optimistic spin before touching IRQs.
1834         *
1835         * We may use a rather large value here to offset the penalty of
1836         * switching away from the active task. Frequently, the client will
1837         * wait upon an old swapbuffer to throttle itself to remain within a
1838         * frame of the gpu. If the client is running in lockstep with the gpu,
1839         * then it should not be waiting long at all, and a sleep now will incur
1840         * extra scheduler latency in producing the next frame. To try to
1841         * avoid adding the cost of enabling/disabling the interrupt to the
1842         * short wait, we first spin to see if the request would have completed
1843         * in the time taken to setup the interrupt.
1844         *
1845         * We need upto 5us to enable the irq, and upto 20us to hide the
1846         * scheduler latency of a context switch, ignoring the secondary
1847         * impacts from a context switch such as cache eviction.
1848         *
1849         * The scheme used for low-latency IO is called "hybrid interrupt
1850         * polling". The suggestion there is to sleep until just before you
1851         * expect to be woken by the device interrupt and then poll for its
1852         * completion. That requires having a good predictor for the request
1853         * duration, which we currently lack.
1854         */
1855        if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
1856            __i915_spin_request(rq, state))
1857                goto out;
1858
1859        /*
1860         * This client is about to stall waiting for the GPU. In many cases
1861         * this is undesirable and limits the throughput of the system, as
1862         * many clients cannot continue processing user input/output whilst
1863         * blocked. RPS autotuning may take tens of milliseconds to respond
1864         * to the GPU load and thus incurs additional latency for the client.
1865         * We can circumvent that by promoting the GPU frequency to maximum
1866         * before we sleep. This makes the GPU throttle up much more quickly
1867         * (good for benchmarks and user experience, e.g. window animations),
1868         * but at a cost of spending more power processing the workload
1869         * (bad for battery).
1870         */
1871        if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1872                intel_rps_boost(rq);
1873
1874        wait.tsk = current;
1875        if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1876                goto out;
1877
1878        /*
1879         * Flush the submission tasklet, but only if it may help this request.
1880         *
1881         * We sometimes experience some latency between the HW interrupts and
1882         * tasklet execution (mostly due to ksoftirqd latency, but it can also
1883         * be due to lazy CS events), so lets run the tasklet manually if there
1884         * is a chance it may submit this request. If the request is not ready
1885         * to run, as it is waiting for other fences to be signaled, flushing
1886         * the tasklet is busy work without any advantage for this client.
1887         *
1888         * If the HW is being lazy, this is the last chance before we go to
1889         * sleep to catch any pending events. We will check periodically in
1890         * the heartbeat to flush the submission tasklets as a last resort
1891         * for unhappy HW.
1892         */
1893        if (i915_request_is_ready(rq))
1894                __intel_engine_flush_submission(rq->engine, false);
1895
1896        for (;;) {
1897                set_current_state(state);
1898
1899                if (dma_fence_is_signaled(&rq->fence))
1900                        break;
1901
1902                if (signal_pending_state(state, current)) {
1903                        timeout = -ERESTARTSYS;
1904                        break;
1905                }
1906
1907                if (!timeout) {
1908                        timeout = -ETIME;
1909                        break;
1910                }
1911
1912                timeout = io_schedule_timeout(timeout);
1913        }
1914        __set_current_state(TASK_RUNNING);
1915
1916        if (READ_ONCE(wait.tsk))
1917                dma_fence_remove_callback(&rq->fence, &wait.cb);
1918        GEM_BUG_ON(!list_empty(&wait.cb.node));
1919
1920out:
1921        mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
1922        trace_i915_request_wait_end(rq);
1923        return timeout;
1924}
1925
1926static int print_sched_attr(const struct i915_sched_attr *attr,
1927                            char *buf, int x, int len)
1928{
1929        if (attr->priority == I915_PRIORITY_INVALID)
1930                return x;
1931
1932        x += snprintf(buf + x, len - x,
1933                      " prio=%d", attr->priority);
1934
1935        return x;
1936}
1937
1938static char queue_status(const struct i915_request *rq)
1939{
1940        if (i915_request_is_active(rq))
1941                return 'E';
1942
1943        if (i915_request_is_ready(rq))
1944                return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
1945
1946        return 'U';
1947}
1948
1949static const char *run_status(const struct i915_request *rq)
1950{
1951        if (__i915_request_is_complete(rq))
1952                return "!";
1953
1954        if (__i915_request_has_started(rq))
1955                return "*";
1956
1957        if (!i915_sw_fence_signaled(&rq->semaphore))
1958                return "&";
1959
1960        return "";
1961}
1962
1963static const char *fence_status(const struct i915_request *rq)
1964{
1965        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
1966                return "+";
1967
1968        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
1969                return "-";
1970
1971        return "";
1972}
1973
1974void i915_request_show(struct drm_printer *m,
1975                       const struct i915_request *rq,
1976                       const char *prefix,
1977                       int indent)
1978{
1979        const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
1980        char buf[80] = "";
1981        int x = 0;
1982
1983        /*
1984         * The prefix is used to show the queue status, for which we use
1985         * the following flags:
1986         *
1987         *  U [Unready]
1988         *    - initial status upon being submitted by the user
1989         *
1990         *    - the request is not ready for execution as it is waiting
1991         *      for external fences
1992         *
1993         *  R [Ready]
1994         *    - all fences the request was waiting on have been signaled,
1995         *      and the request is now ready for execution and will be
1996         *      in a backend queue
1997         *
1998         *    - a ready request may still need to wait on semaphores
1999         *      [internal fences]
2000         *
2001         *  V [Ready/virtual]
2002         *    - same as ready, but queued over multiple backends
2003         *
2004         *  E [Executing]
2005         *    - the request has been transferred from the backend queue and
2006         *      submitted for execution on HW
2007         *
2008         *    - a completed request may still be regarded as executing, its
2009         *      status may not be updated until it is retired and removed
2010         *      from the lists
2011         */
2012
2013        x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2014
2015        drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
2016                   prefix, indent, "                ",
2017                   queue_status(rq),
2018                   rq->fence.context, rq->fence.seqno,
2019                   run_status(rq),
2020                   fence_status(rq),
2021                   buf,
2022                   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2023                   name);
2024}
2025
2026static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
2027{
2028        u32 ring = ENGINE_READ(engine, RING_START);
2029
2030        return ring == i915_ggtt_offset(rq->ring->vma);
2031}
2032
2033static bool match_ring(struct i915_request *rq)
2034{
2035        struct intel_engine_cs *engine;
2036        bool found;
2037        int i;
2038
2039        if (!intel_engine_is_virtual(rq->engine))
2040                return engine_match_ring(rq->engine, rq);
2041
2042        found = false;
2043        i = 0;
2044        while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
2045                found = engine_match_ring(engine, rq);
2046                if (found)
2047                        break;
2048        }
2049
2050        return found;
2051}
2052
2053enum i915_request_state i915_test_request_state(struct i915_request *rq)
2054{
2055        if (i915_request_completed(rq))
2056                return I915_REQUEST_COMPLETE;
2057
2058        if (!i915_request_started(rq))
2059                return I915_REQUEST_PENDING;
2060
2061        if (match_ring(rq))
2062                return I915_REQUEST_ACTIVE;
2063
2064        return I915_REQUEST_QUEUED;
2065}
2066
2067#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2068#include "selftests/mock_request.c"
2069#include "selftests/i915_request.c"
2070#endif
2071
2072void i915_request_module_exit(void)
2073{
2074        kmem_cache_destroy(slab_execute_cbs);
2075        kmem_cache_destroy(slab_requests);
2076}
2077
2078int __init i915_request_module_init(void)
2079{
2080        slab_requests =
2081                kmem_cache_create("i915_request",
2082                                  sizeof(struct i915_request),
2083                                  __alignof__(struct i915_request),
2084                                  SLAB_HWCACHE_ALIGN |
2085                                  SLAB_RECLAIM_ACCOUNT |
2086                                  SLAB_TYPESAFE_BY_RCU,
2087                                  __i915_request_ctor);
2088        if (!slab_requests)
2089                return -ENOMEM;
2090
2091        slab_execute_cbs = KMEM_CACHE(execute_cb,
2092                                             SLAB_HWCACHE_ALIGN |
2093                                             SLAB_RECLAIM_ACCOUNT |
2094                                             SLAB_TYPESAFE_BY_RCU);
2095        if (!slab_execute_cbs)
2096                goto err_requests;
2097
2098        return 0;
2099
2100err_requests:
2101        kmem_cache_destroy(slab_requests);
2102        return -ENOMEM;
2103}
2104