linux/drivers/gpu/drm/i915/i915_request.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2008-2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/dma-fence-array.h>
  26#include <linux/dma-fence-chain.h>
  27#include <linux/irq_work.h>
  28#include <linux/prefetch.h>
  29#include <linux/sched.h>
  30#include <linux/sched/clock.h>
  31#include <linux/sched/signal.h>
  32
  33#include "gem/i915_gem_context.h"
  34#include "gt/intel_breadcrumbs.h"
  35#include "gt/intel_context.h"
  36#include "gt/intel_engine.h"
  37#include "gt/intel_engine_heartbeat.h"
  38#include "gt/intel_gpu_commands.h"
  39#include "gt/intel_reset.h"
  40#include "gt/intel_ring.h"
  41#include "gt/intel_rps.h"
  42
  43#include "i915_active.h"
  44#include "i915_drv.h"
  45#include "i915_globals.h"
  46#include "i915_trace.h"
  47#include "intel_pm.h"
  48
  49struct execute_cb {
  50        struct irq_work work;
  51        struct i915_sw_fence *fence;
  52        void (*hook)(struct i915_request *rq, struct dma_fence *signal);
  53        struct i915_request *signal;
  54};
  55
  56static struct i915_global_request {
  57        struct i915_global base;
  58        struct kmem_cache *slab_requests;
  59        struct kmem_cache *slab_execute_cbs;
  60} global;
  61
  62static const char *i915_fence_get_driver_name(struct dma_fence *fence)
  63{
  64        return dev_name(to_request(fence)->engine->i915->drm.dev);
  65}
  66
  67static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
  68{
  69        const struct i915_gem_context *ctx;
  70
  71        /*
  72         * The timeline struct (as part of the ppgtt underneath a context)
  73         * may be freed when the request is no longer in use by the GPU.
  74         * We could extend the life of a context to beyond that of all
  75         * fences, possibly keeping the hw resource around indefinitely,
  76         * or we just give them a false name. Since
  77         * dma_fence_ops.get_timeline_name is a debug feature, the occasional
  78         * lie seems justifiable.
  79         */
  80        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  81                return "signaled";
  82
  83        ctx = i915_request_gem_context(to_request(fence));
  84        if (!ctx)
  85                return "[" DRIVER_NAME "]";
  86
  87        return ctx->name;
  88}
  89
  90static bool i915_fence_signaled(struct dma_fence *fence)
  91{
  92        return i915_request_completed(to_request(fence));
  93}
  94
  95static bool i915_fence_enable_signaling(struct dma_fence *fence)
  96{
  97        return i915_request_enable_breadcrumb(to_request(fence));
  98}
  99
 100static signed long i915_fence_wait(struct dma_fence *fence,
 101                                   bool interruptible,
 102                                   signed long timeout)
 103{
 104        return i915_request_wait(to_request(fence),
 105                                 interruptible | I915_WAIT_PRIORITY,
 106                                 timeout);
 107}
 108
 109struct kmem_cache *i915_request_slab_cache(void)
 110{
 111        return global.slab_requests;
 112}
 113
 114static void i915_fence_release(struct dma_fence *fence)
 115{
 116        struct i915_request *rq = to_request(fence);
 117
 118        /*
 119         * The request is put onto a RCU freelist (i.e. the address
 120         * is immediately reused), mark the fences as being freed now.
 121         * Otherwise the debugobjects for the fences are only marked as
 122         * freed when the slab cache itself is freed, and so we would get
 123         * caught trying to reuse dead objects.
 124         */
 125        i915_sw_fence_fini(&rq->submit);
 126        i915_sw_fence_fini(&rq->semaphore);
 127
 128        /*
 129         * Keep one request on each engine for reserved use under mempressure
 130         *
 131         * We do not hold a reference to the engine here and so have to be
 132         * very careful in what rq->engine we poke. The virtual engine is
 133         * referenced via the rq->context and we released that ref during
 134         * i915_request_retire(), ergo we must not dereference a virtual
 135         * engine here. Not that we would want to, as the only consumer of
 136         * the reserved engine->request_pool is the power management parking,
 137         * which must-not-fail, and that is only run on the physical engines.
 138         *
 139         * Since the request must have been executed to be have completed,
 140         * we know that it will have been processed by the HW and will
 141         * not be unsubmitted again, so rq->engine and rq->execution_mask
 142         * at this point is stable. rq->execution_mask will be a single
 143         * bit if the last and _only_ engine it could execution on was a
 144         * physical engine, if it's multiple bits then it started on and
 145         * could still be on a virtual engine. Thus if the mask is not a
 146         * power-of-two we assume that rq->engine may still be a virtual
 147         * engine and so a dangling invalid pointer that we cannot dereference
 148         *
 149         * For example, consider the flow of a bonded request through a virtual
 150         * engine. The request is created with a wide engine mask (all engines
 151         * that we might execute on). On processing the bond, the request mask
 152         * is reduced to one or more engines. If the request is subsequently
 153         * bound to a single engine, it will then be constrained to only
 154         * execute on that engine and never returned to the virtual engine
 155         * after timeslicing away, see __unwind_incomplete_requests(). Thus we
 156         * know that if the rq->execution_mask is a single bit, rq->engine
 157         * can be a physical engine with the exact corresponding mask.
 158         */
 159        if (is_power_of_2(rq->execution_mask) &&
 160            !cmpxchg(&rq->engine->request_pool, NULL, rq))
 161                return;
 162
 163        kmem_cache_free(global.slab_requests, rq);
 164}
 165
 166const struct dma_fence_ops i915_fence_ops = {
 167        .get_driver_name = i915_fence_get_driver_name,
 168        .get_timeline_name = i915_fence_get_timeline_name,
 169        .enable_signaling = i915_fence_enable_signaling,
 170        .signaled = i915_fence_signaled,
 171        .wait = i915_fence_wait,
 172        .release = i915_fence_release,
 173};
 174
 175static void irq_execute_cb(struct irq_work *wrk)
 176{
 177        struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
 178
 179        i915_sw_fence_complete(cb->fence);
 180        kmem_cache_free(global.slab_execute_cbs, cb);
 181}
 182
 183static void irq_execute_cb_hook(struct irq_work *wrk)
 184{
 185        struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
 186
 187        cb->hook(container_of(cb->fence, struct i915_request, submit),
 188                 &cb->signal->fence);
 189        i915_request_put(cb->signal);
 190
 191        irq_execute_cb(wrk);
 192}
 193
 194static __always_inline void
 195__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
 196{
 197        struct execute_cb *cb, *cn;
 198
 199        if (llist_empty(&rq->execute_cb))
 200                return;
 201
 202        llist_for_each_entry_safe(cb, cn,
 203                                  llist_del_all(&rq->execute_cb),
 204                                  work.node.llist)
 205                fn(&cb->work);
 206}
 207
 208static void __notify_execute_cb_irq(struct i915_request *rq)
 209{
 210        __notify_execute_cb(rq, irq_work_queue);
 211}
 212
 213static bool irq_work_imm(struct irq_work *wrk)
 214{
 215        wrk->func(wrk);
 216        return false;
 217}
 218
 219static void __notify_execute_cb_imm(struct i915_request *rq)
 220{
 221        __notify_execute_cb(rq, irq_work_imm);
 222}
 223
 224static void free_capture_list(struct i915_request *request)
 225{
 226        struct i915_capture_list *capture;
 227
 228        capture = fetch_and_zero(&request->capture_list);
 229        while (capture) {
 230                struct i915_capture_list *next = capture->next;
 231
 232                kfree(capture);
 233                capture = next;
 234        }
 235}
 236
 237static void __i915_request_fill(struct i915_request *rq, u8 val)
 238{
 239        void *vaddr = rq->ring->vaddr;
 240        u32 head;
 241
 242        head = rq->infix;
 243        if (rq->postfix < head) {
 244                memset(vaddr + head, val, rq->ring->size - head);
 245                head = 0;
 246        }
 247        memset(vaddr + head, val, rq->postfix - head);
 248}
 249
 250/**
 251 * i915_request_active_engine
 252 * @rq: request to inspect
 253 * @active: pointer in which to return the active engine
 254 *
 255 * Fills the currently active engine to the @active pointer if the request
 256 * is active and still not completed.
 257 *
 258 * Returns true if request was active or false otherwise.
 259 */
 260bool
 261i915_request_active_engine(struct i915_request *rq,
 262                           struct intel_engine_cs **active)
 263{
 264        struct intel_engine_cs *engine, *locked;
 265        bool ret = false;
 266
 267        /*
 268         * Serialise with __i915_request_submit() so that it sees
 269         * is-banned?, or we know the request is already inflight.
 270         *
 271         * Note that rq->engine is unstable, and so we double
 272         * check that we have acquired the lock on the final engine.
 273         */
 274        locked = READ_ONCE(rq->engine);
 275        spin_lock_irq(&locked->active.lock);
 276        while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
 277                spin_unlock(&locked->active.lock);
 278                locked = engine;
 279                spin_lock(&locked->active.lock);
 280        }
 281
 282        if (i915_request_is_active(rq)) {
 283                if (!__i915_request_is_complete(rq))
 284                        *active = locked;
 285                ret = true;
 286        }
 287
 288        spin_unlock_irq(&locked->active.lock);
 289
 290        return ret;
 291}
 292
 293
 294static void remove_from_engine(struct i915_request *rq)
 295{
 296        struct intel_engine_cs *engine, *locked;
 297
 298        /*
 299         * Virtual engines complicate acquiring the engine timeline lock,
 300         * as their rq->engine pointer is not stable until under that
 301         * engine lock. The simple ploy we use is to take the lock then
 302         * check that the rq still belongs to the newly locked engine.
 303         */
 304        locked = READ_ONCE(rq->engine);
 305        spin_lock_irq(&locked->active.lock);
 306        while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
 307                spin_unlock(&locked->active.lock);
 308                spin_lock(&engine->active.lock);
 309                locked = engine;
 310        }
 311        list_del_init(&rq->sched.link);
 312
 313        clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 314        clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
 315
 316        /* Prevent further __await_execution() registering a cb, then flush */
 317        set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 318
 319        spin_unlock_irq(&locked->active.lock);
 320
 321        __notify_execute_cb_imm(rq);
 322}
 323
 324static void __rq_init_watchdog(struct i915_request *rq)
 325{
 326        rq->watchdog.timer.function = NULL;
 327}
 328
 329static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
 330{
 331        struct i915_request *rq =
 332                container_of(hrtimer, struct i915_request, watchdog.timer);
 333        struct intel_gt *gt = rq->engine->gt;
 334
 335        if (!i915_request_completed(rq)) {
 336                if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
 337                        schedule_work(&gt->watchdog.work);
 338        } else {
 339                i915_request_put(rq);
 340        }
 341
 342        return HRTIMER_NORESTART;
 343}
 344
 345static void __rq_arm_watchdog(struct i915_request *rq)
 346{
 347        struct i915_request_watchdog *wdg = &rq->watchdog;
 348        struct intel_context *ce = rq->context;
 349
 350        if (!ce->watchdog.timeout_us)
 351                return;
 352
 353        i915_request_get(rq);
 354
 355        hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 356        wdg->timer.function = __rq_watchdog_expired;
 357        hrtimer_start_range_ns(&wdg->timer,
 358                               ns_to_ktime(ce->watchdog.timeout_us *
 359                                           NSEC_PER_USEC),
 360                               NSEC_PER_MSEC,
 361                               HRTIMER_MODE_REL);
 362}
 363
 364static void __rq_cancel_watchdog(struct i915_request *rq)
 365{
 366        struct i915_request_watchdog *wdg = &rq->watchdog;
 367
 368        if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
 369                i915_request_put(rq);
 370}
 371
 372bool i915_request_retire(struct i915_request *rq)
 373{
 374        if (!__i915_request_is_complete(rq))
 375                return false;
 376
 377        RQ_TRACE(rq, "\n");
 378
 379        GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 380        trace_i915_request_retire(rq);
 381        i915_request_mark_complete(rq);
 382
 383        __rq_cancel_watchdog(rq);
 384
 385        /*
 386         * We know the GPU must have read the request to have
 387         * sent us the seqno + interrupt, so use the position
 388         * of tail of the request to update the last known position
 389         * of the GPU head.
 390         *
 391         * Note this requires that we are always called in request
 392         * completion order.
 393         */
 394        GEM_BUG_ON(!list_is_first(&rq->link,
 395                                  &i915_request_timeline(rq)->requests));
 396        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 397                /* Poison before we release our space in the ring */
 398                __i915_request_fill(rq, POISON_FREE);
 399        rq->ring->head = rq->postfix;
 400
 401        if (!i915_request_signaled(rq)) {
 402                spin_lock_irq(&rq->lock);
 403                dma_fence_signal_locked(&rq->fence);
 404                spin_unlock_irq(&rq->lock);
 405        }
 406
 407        if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
 408                atomic_dec(&rq->engine->gt->rps.num_waiters);
 409
 410        /*
 411         * We only loosely track inflight requests across preemption,
 412         * and so we may find ourselves attempting to retire a _completed_
 413         * request that we have removed from the HW and put back on a run
 414         * queue.
 415         *
 416         * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be
 417         * after removing the breadcrumb and signaling it, so that we do not
 418         * inadvertently attach the breadcrumb to a completed request.
 419         */
 420        if (!list_empty(&rq->sched.link))
 421                remove_from_engine(rq);
 422        GEM_BUG_ON(!llist_empty(&rq->execute_cb));
 423
 424        __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
 425
 426        intel_context_exit(rq->context);
 427        intel_context_unpin(rq->context);
 428
 429        free_capture_list(rq);
 430        i915_sched_node_fini(&rq->sched);
 431        i915_request_put(rq);
 432
 433        return true;
 434}
 435
 436void i915_request_retire_upto(struct i915_request *rq)
 437{
 438        struct intel_timeline * const tl = i915_request_timeline(rq);
 439        struct i915_request *tmp;
 440
 441        RQ_TRACE(rq, "\n");
 442        GEM_BUG_ON(!__i915_request_is_complete(rq));
 443
 444        do {
 445                tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
 446        } while (i915_request_retire(tmp) && tmp != rq);
 447}
 448
 449static struct i915_request * const *
 450__engine_active(struct intel_engine_cs *engine)
 451{
 452        return READ_ONCE(engine->execlists.active);
 453}
 454
 455static bool __request_in_flight(const struct i915_request *signal)
 456{
 457        struct i915_request * const *port, *rq;
 458        bool inflight = false;
 459
 460        if (!i915_request_is_ready(signal))
 461                return false;
 462
 463        /*
 464         * Even if we have unwound the request, it may still be on
 465         * the GPU (preempt-to-busy). If that request is inside an
 466         * unpreemptible critical section, it will not be removed. Some
 467         * GPU functions may even be stuck waiting for the paired request
 468         * (__await_execution) to be submitted and cannot be preempted
 469         * until the bond is executing.
 470         *
 471         * As we know that there are always preemption points between
 472         * requests, we know that only the currently executing request
 473         * may be still active even though we have cleared the flag.
 474         * However, we can't rely on our tracking of ELSP[0] to know
 475         * which request is currently active and so maybe stuck, as
 476         * the tracking maybe an event behind. Instead assume that
 477         * if the context is still inflight, then it is still active
 478         * even if the active flag has been cleared.
 479         *
 480         * To further complicate matters, if there a pending promotion, the HW
 481         * may either perform a context switch to the second inflight execlists,
 482         * or it may switch to the pending set of execlists. In the case of the
 483         * latter, it may send the ACK and we process the event copying the
 484         * pending[] over top of inflight[], _overwriting_ our *active. Since
 485         * this implies the HW is arbitrating and not struck in *active, we do
 486         * not worry about complete accuracy, but we do require no read/write
 487         * tearing of the pointer [the read of the pointer must be valid, even
 488         * as the array is being overwritten, for which we require the writes
 489         * to avoid tearing.]
 490         *
 491         * Note that the read of *execlists->active may race with the promotion
 492         * of execlists->pending[] to execlists->inflight[], overwritting
 493         * the value at *execlists->active. This is fine. The promotion implies
 494         * that we received an ACK from the HW, and so the context is not
 495         * stuck -- if we do not see ourselves in *active, the inflight status
 496         * is valid. If instead we see ourselves being copied into *active,
 497         * we are inflight and may signal the callback.
 498         */
 499        if (!intel_context_inflight(signal->context))
 500                return false;
 501
 502        rcu_read_lock();
 503        for (port = __engine_active(signal->engine);
 504             (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
 505             port++) {
 506                if (rq->context == signal->context) {
 507                        inflight = i915_seqno_passed(rq->fence.seqno,
 508                                                     signal->fence.seqno);
 509                        break;
 510                }
 511        }
 512        rcu_read_unlock();
 513
 514        return inflight;
 515}
 516
 517static int
 518__await_execution(struct i915_request *rq,
 519                  struct i915_request *signal,
 520                  void (*hook)(struct i915_request *rq,
 521                               struct dma_fence *signal),
 522                  gfp_t gfp)
 523{
 524        struct execute_cb *cb;
 525
 526        if (i915_request_is_active(signal)) {
 527                if (hook)
 528                        hook(rq, &signal->fence);
 529                return 0;
 530        }
 531
 532        cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
 533        if (!cb)
 534                return -ENOMEM;
 535
 536        cb->fence = &rq->submit;
 537        i915_sw_fence_await(cb->fence);
 538        init_irq_work(&cb->work, irq_execute_cb);
 539
 540        if (hook) {
 541                cb->hook = hook;
 542                cb->signal = i915_request_get(signal);
 543                cb->work.func = irq_execute_cb_hook;
 544        }
 545
 546        /*
 547         * Register the callback first, then see if the signaler is already
 548         * active. This ensures that if we race with the
 549         * __notify_execute_cb from i915_request_submit() and we are not
 550         * included in that list, we get a second bite of the cherry and
 551         * execute it ourselves. After this point, a future
 552         * i915_request_submit() will notify us.
 553         *
 554         * In i915_request_retire() we set the ACTIVE bit on a completed
 555         * request (then flush the execute_cb). So by registering the
 556         * callback first, then checking the ACTIVE bit, we serialise with
 557         * the completed/retired request.
 558         */
 559        if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
 560                if (i915_request_is_active(signal) ||
 561                    __request_in_flight(signal))
 562                        __notify_execute_cb_imm(signal);
 563        }
 564
 565        return 0;
 566}
 567
 568static bool fatal_error(int error)
 569{
 570        switch (error) {
 571        case 0: /* not an error! */
 572        case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
 573        case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
 574                return false;
 575        default:
 576                return true;
 577        }
 578}
 579
 580void __i915_request_skip(struct i915_request *rq)
 581{
 582        GEM_BUG_ON(!fatal_error(rq->fence.error));
 583
 584        if (rq->infix == rq->postfix)
 585                return;
 586
 587        RQ_TRACE(rq, "error: %d\n", rq->fence.error);
 588
 589        /*
 590         * As this request likely depends on state from the lost
 591         * context, clear out all the user operations leaving the
 592         * breadcrumb at the end (so we get the fence notifications).
 593         */
 594        __i915_request_fill(rq, 0);
 595        rq->infix = rq->postfix;
 596}
 597
 598bool i915_request_set_error_once(struct i915_request *rq, int error)
 599{
 600        int old;
 601
 602        GEM_BUG_ON(!IS_ERR_VALUE((long)error));
 603
 604        if (i915_request_signaled(rq))
 605                return false;
 606
 607        old = READ_ONCE(rq->fence.error);
 608        do {
 609                if (fatal_error(old))
 610                        return false;
 611        } while (!try_cmpxchg(&rq->fence.error, &old, error));
 612
 613        return true;
 614}
 615
 616struct i915_request *i915_request_mark_eio(struct i915_request *rq)
 617{
 618        if (__i915_request_is_complete(rq))
 619                return NULL;
 620
 621        GEM_BUG_ON(i915_request_signaled(rq));
 622
 623        /* As soon as the request is completed, it may be retired */
 624        rq = i915_request_get(rq);
 625
 626        i915_request_set_error_once(rq, -EIO);
 627        i915_request_mark_complete(rq);
 628
 629        return rq;
 630}
 631
 632bool __i915_request_submit(struct i915_request *request)
 633{
 634        struct intel_engine_cs *engine = request->engine;
 635        bool result = false;
 636
 637        RQ_TRACE(request, "\n");
 638
 639        GEM_BUG_ON(!irqs_disabled());
 640        lockdep_assert_held(&engine->active.lock);
 641
 642        /*
 643         * With the advent of preempt-to-busy, we frequently encounter
 644         * requests that we have unsubmitted from HW, but left running
 645         * until the next ack and so have completed in the meantime. On
 646         * resubmission of that completed request, we can skip
 647         * updating the payload, and execlists can even skip submitting
 648         * the request.
 649         *
 650         * We must remove the request from the caller's priority queue,
 651         * and the caller must only call us when the request is in their
 652         * priority queue, under the active.lock. This ensures that the
 653         * request has *not* yet been retired and we can safely move
 654         * the request into the engine->active.list where it will be
 655         * dropped upon retiring. (Otherwise if resubmit a *retired*
 656         * request, this would be a horrible use-after-free.)
 657         */
 658        if (__i915_request_is_complete(request)) {
 659                list_del_init(&request->sched.link);
 660                goto active;
 661        }
 662
 663        if (unlikely(intel_context_is_banned(request->context)))
 664                i915_request_set_error_once(request, -EIO);
 665
 666        if (unlikely(fatal_error(request->fence.error)))
 667                __i915_request_skip(request);
 668
 669        /*
 670         * Are we using semaphores when the gpu is already saturated?
 671         *
 672         * Using semaphores incurs a cost in having the GPU poll a
 673         * memory location, busywaiting for it to change. The continual
 674         * memory reads can have a noticeable impact on the rest of the
 675         * system with the extra bus traffic, stalling the cpu as it too
 676         * tries to access memory across the bus (perf stat -e bus-cycles).
 677         *
 678         * If we installed a semaphore on this request and we only submit
 679         * the request after the signaler completed, that indicates the
 680         * system is overloaded and using semaphores at this time only
 681         * increases the amount of work we are doing. If so, we disable
 682         * further use of semaphores until we are idle again, whence we
 683         * optimistically try again.
 684         */
 685        if (request->sched.semaphores &&
 686            i915_sw_fence_signaled(&request->semaphore))
 687                engine->saturated |= request->sched.semaphores;
 688
 689        engine->emit_fini_breadcrumb(request,
 690                                     request->ring->vaddr + request->postfix);
 691
 692        trace_i915_request_execute(request);
 693        engine->serial++;
 694        result = true;
 695
 696        GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 697        list_move_tail(&request->sched.link, &engine->active.requests);
 698active:
 699        clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 700        set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 701
 702        /*
 703         * XXX Rollback bonded-execution on __i915_request_unsubmit()?
 704         *
 705         * In the future, perhaps when we have an active time-slicing scheduler,
 706         * it will be interesting to unsubmit parallel execution and remove
 707         * busywaits from the GPU until their master is restarted. This is
 708         * quite hairy, we have to carefully rollback the fence and do a
 709         * preempt-to-idle cycle on the target engine, all the while the
 710         * master execute_cb may refire.
 711         */
 712        __notify_execute_cb_irq(request);
 713
 714        /* We may be recursing from the signal callback of another i915 fence */
 715        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 716                i915_request_enable_breadcrumb(request);
 717
 718        return result;
 719}
 720
 721void i915_request_submit(struct i915_request *request)
 722{
 723        struct intel_engine_cs *engine = request->engine;
 724        unsigned long flags;
 725
 726        /* Will be called from irq-context when using foreign fences. */
 727        spin_lock_irqsave(&engine->active.lock, flags);
 728
 729        __i915_request_submit(request);
 730
 731        spin_unlock_irqrestore(&engine->active.lock, flags);
 732}
 733
 734void __i915_request_unsubmit(struct i915_request *request)
 735{
 736        struct intel_engine_cs *engine = request->engine;
 737
 738        /*
 739         * Only unwind in reverse order, required so that the per-context list
 740         * is kept in seqno/ring order.
 741         */
 742        RQ_TRACE(request, "\n");
 743
 744        GEM_BUG_ON(!irqs_disabled());
 745        lockdep_assert_held(&engine->active.lock);
 746
 747        /*
 748         * Before we remove this breadcrumb from the signal list, we have
 749         * to ensure that a concurrent dma_fence_enable_signaling() does not
 750         * attach itself. We first mark the request as no longer active and
 751         * make sure that is visible to other cores, and then remove the
 752         * breadcrumb if attached.
 753         */
 754        GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
 755        clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
 756        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
 757                i915_request_cancel_breadcrumb(request);
 758
 759        /* We've already spun, don't charge on resubmitting. */
 760        if (request->sched.semaphores && __i915_request_has_started(request))
 761                request->sched.semaphores = 0;
 762
 763        /*
 764         * We don't need to wake_up any waiters on request->execute, they
 765         * will get woken by any other event or us re-adding this request
 766         * to the engine timeline (__i915_request_submit()). The waiters
 767         * should be quite adapt at finding that the request now has a new
 768         * global_seqno to the one they went to sleep on.
 769         */
 770}
 771
 772void i915_request_unsubmit(struct i915_request *request)
 773{
 774        struct intel_engine_cs *engine = request->engine;
 775        unsigned long flags;
 776
 777        /* Will be called from irq-context when using foreign fences. */
 778        spin_lock_irqsave(&engine->active.lock, flags);
 779
 780        __i915_request_unsubmit(request);
 781
 782        spin_unlock_irqrestore(&engine->active.lock, flags);
 783}
 784
 785static void __cancel_request(struct i915_request *rq)
 786{
 787        struct intel_engine_cs *engine = NULL;
 788
 789        i915_request_active_engine(rq, &engine);
 790
 791        if (engine && intel_engine_pulse(engine))
 792                intel_gt_handle_error(engine->gt, engine->mask, 0,
 793                                      "request cancellation by %s",
 794                                      current->comm);
 795}
 796
 797void i915_request_cancel(struct i915_request *rq, int error)
 798{
 799        if (!i915_request_set_error_once(rq, error))
 800                return;
 801
 802        set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
 803
 804        __cancel_request(rq);
 805}
 806
 807static int __i915_sw_fence_call
 808submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 809{
 810        struct i915_request *request =
 811                container_of(fence, typeof(*request), submit);
 812
 813        switch (state) {
 814        case FENCE_COMPLETE:
 815                trace_i915_request_submit(request);
 816
 817                if (unlikely(fence->error))
 818                        i915_request_set_error_once(request, fence->error);
 819                else
 820                        __rq_arm_watchdog(request);
 821
 822                /*
 823                 * We need to serialize use of the submit_request() callback
 824                 * with its hotplugging performed during an emergency
 825                 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
 826                 * critical section in order to force i915_gem_set_wedged() to
 827                 * wait until the submit_request() is completed before
 828                 * proceeding.
 829                 */
 830                rcu_read_lock();
 831                request->engine->submit_request(request);
 832                rcu_read_unlock();
 833                break;
 834
 835        case FENCE_FREE:
 836                i915_request_put(request);
 837                break;
 838        }
 839
 840        return NOTIFY_DONE;
 841}
 842
 843static int __i915_sw_fence_call
 844semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 845{
 846        struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
 847
 848        switch (state) {
 849        case FENCE_COMPLETE:
 850                break;
 851
 852        case FENCE_FREE:
 853                i915_request_put(rq);
 854                break;
 855        }
 856
 857        return NOTIFY_DONE;
 858}
 859
 860static void retire_requests(struct intel_timeline *tl)
 861{
 862        struct i915_request *rq, *rn;
 863
 864        list_for_each_entry_safe(rq, rn, &tl->requests, link)
 865                if (!i915_request_retire(rq))
 866                        break;
 867}
 868
 869static noinline struct i915_request *
 870request_alloc_slow(struct intel_timeline *tl,
 871                   struct i915_request **rsvd,
 872                   gfp_t gfp)
 873{
 874        struct i915_request *rq;
 875
 876        /* If we cannot wait, dip into our reserves */
 877        if (!gfpflags_allow_blocking(gfp)) {
 878                rq = xchg(rsvd, NULL);
 879                if (!rq) /* Use the normal failure path for one final WARN */
 880                        goto out;
 881
 882                return rq;
 883        }
 884
 885        if (list_empty(&tl->requests))
 886                goto out;
 887
 888        /* Move our oldest request to the slab-cache (if not in use!) */
 889        rq = list_first_entry(&tl->requests, typeof(*rq), link);
 890        i915_request_retire(rq);
 891
 892        rq = kmem_cache_alloc(global.slab_requests,
 893                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 894        if (rq)
 895                return rq;
 896
 897        /* Ratelimit ourselves to prevent oom from malicious clients */
 898        rq = list_last_entry(&tl->requests, typeof(*rq), link);
 899        cond_synchronize_rcu(rq->rcustate);
 900
 901        /* Retire our old requests in the hope that we free some */
 902        retire_requests(tl);
 903
 904out:
 905        return kmem_cache_alloc(global.slab_requests, gfp);
 906}
 907
 908static void __i915_request_ctor(void *arg)
 909{
 910        struct i915_request *rq = arg;
 911
 912        spin_lock_init(&rq->lock);
 913        i915_sched_node_init(&rq->sched);
 914        i915_sw_fence_init(&rq->submit, submit_notify);
 915        i915_sw_fence_init(&rq->semaphore, semaphore_notify);
 916
 917        dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
 918
 919        rq->capture_list = NULL;
 920
 921        init_llist_head(&rq->execute_cb);
 922}
 923
 924struct i915_request *
 925__i915_request_create(struct intel_context *ce, gfp_t gfp)
 926{
 927        struct intel_timeline *tl = ce->timeline;
 928        struct i915_request *rq;
 929        u32 seqno;
 930        int ret;
 931
 932        might_sleep_if(gfpflags_allow_blocking(gfp));
 933
 934        /* Check that the caller provided an already pinned context */
 935        __intel_context_pin(ce);
 936
 937        /*
 938         * Beware: Dragons be flying overhead.
 939         *
 940         * We use RCU to look up requests in flight. The lookups may
 941         * race with the request being allocated from the slab freelist.
 942         * That is the request we are writing to here, may be in the process
 943         * of being read by __i915_active_request_get_rcu(). As such,
 944         * we have to be very careful when overwriting the contents. During
 945         * the RCU lookup, we change chase the request->engine pointer,
 946         * read the request->global_seqno and increment the reference count.
 947         *
 948         * The reference count is incremented atomically. If it is zero,
 949         * the lookup knows the request is unallocated and complete. Otherwise,
 950         * it is either still in use, or has been reallocated and reset
 951         * with dma_fence_init(). This increment is safe for release as we
 952         * check that the request we have a reference to and matches the active
 953         * request.
 954         *
 955         * Before we increment the refcount, we chase the request->engine
 956         * pointer. We must not call kmem_cache_zalloc() or else we set
 957         * that pointer to NULL and cause a crash during the lookup. If
 958         * we see the request is completed (based on the value of the
 959         * old engine and seqno), the lookup is complete and reports NULL.
 960         * If we decide the request is not completed (new engine or seqno),
 961         * then we grab a reference and double check that it is still the
 962         * active request - which it won't be and restart the lookup.
 963         *
 964         * Do not use kmem_cache_zalloc() here!
 965         */
 966        rq = kmem_cache_alloc(global.slab_requests,
 967                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 968        if (unlikely(!rq)) {
 969                rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
 970                if (!rq) {
 971                        ret = -ENOMEM;
 972                        goto err_unreserve;
 973                }
 974        }
 975
 976        rq->context = ce;
 977        rq->engine = ce->engine;
 978        rq->ring = ce->ring;
 979        rq->execution_mask = ce->engine->mask;
 980
 981        kref_init(&rq->fence.refcount);
 982        rq->fence.flags = 0;
 983        rq->fence.error = 0;
 984        INIT_LIST_HEAD(&rq->fence.cb_list);
 985
 986        ret = intel_timeline_get_seqno(tl, rq, &seqno);
 987        if (ret)
 988                goto err_free;
 989
 990        rq->fence.context = tl->fence_context;
 991        rq->fence.seqno = seqno;
 992
 993        RCU_INIT_POINTER(rq->timeline, tl);
 994        rq->hwsp_seqno = tl->hwsp_seqno;
 995        GEM_BUG_ON(__i915_request_is_complete(rq));
 996
 997        rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 998
 999        /* We bump the ref for the fence chain */
1000        i915_sw_fence_reinit(&i915_request_get(rq)->submit);
1001        i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
1002
1003        i915_sched_node_reinit(&rq->sched);
1004
1005        /* No zalloc, everything must be cleared after use */
1006        rq->batch = NULL;
1007        __rq_init_watchdog(rq);
1008        GEM_BUG_ON(rq->capture_list);
1009        GEM_BUG_ON(!llist_empty(&rq->execute_cb));
1010
1011        /*
1012         * Reserve space in the ring buffer for all the commands required to
1013         * eventually emit this request. This is to guarantee that the
1014         * i915_request_add() call can't fail. Note that the reserve may need
1015         * to be redone if the request is not actually submitted straight
1016         * away, e.g. because a GPU scheduler has deferred it.
1017         *
1018         * Note that due to how we add reserved_space to intel_ring_begin()
1019         * we need to double our request to ensure that if we need to wrap
1020         * around inside i915_request_add() there is sufficient space at
1021         * the beginning of the ring as well.
1022         */
1023        rq->reserved_space =
1024                2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
1025
1026        /*
1027         * Record the position of the start of the request so that
1028         * should we detect the updated seqno part-way through the
1029         * GPU processing the request, we never over-estimate the
1030         * position of the head.
1031         */
1032        rq->head = rq->ring->emit;
1033
1034        ret = rq->engine->request_alloc(rq);
1035        if (ret)
1036                goto err_unwind;
1037
1038        rq->infix = rq->ring->emit; /* end of header; start of user payload */
1039
1040        intel_context_mark_active(ce);
1041        list_add_tail_rcu(&rq->link, &tl->requests);
1042
1043        return rq;
1044
1045err_unwind:
1046        ce->ring->emit = rq->head;
1047
1048        /* Make sure we didn't add ourselves to external state before freeing */
1049        GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
1050        GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
1051
1052err_free:
1053        kmem_cache_free(global.slab_requests, rq);
1054err_unreserve:
1055        intel_context_unpin(ce);
1056        return ERR_PTR(ret);
1057}
1058
1059struct i915_request *
1060i915_request_create(struct intel_context *ce)
1061{
1062        struct i915_request *rq;
1063        struct intel_timeline *tl;
1064
1065        tl = intel_context_timeline_lock(ce);
1066        if (IS_ERR(tl))
1067                return ERR_CAST(tl);
1068
1069        /* Move our oldest request to the slab-cache (if not in use!) */
1070        rq = list_first_entry(&tl->requests, typeof(*rq), link);
1071        if (!list_is_last(&rq->link, &tl->requests))
1072                i915_request_retire(rq);
1073
1074        intel_context_enter(ce);
1075        rq = __i915_request_create(ce, GFP_KERNEL);
1076        intel_context_exit(ce); /* active reference transferred to request */
1077        if (IS_ERR(rq))
1078                goto err_unlock;
1079
1080        /* Check that we do not interrupt ourselves with a new request */
1081        rq->cookie = lockdep_pin_lock(&tl->mutex);
1082
1083        return rq;
1084
1085err_unlock:
1086        intel_context_timeline_unlock(tl);
1087        return rq;
1088}
1089
1090static int
1091i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1092{
1093        struct dma_fence *fence;
1094        int err;
1095
1096        if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1097                return 0;
1098
1099        if (i915_request_started(signal))
1100                return 0;
1101
1102        /*
1103         * The caller holds a reference on @signal, but we do not serialise
1104         * against it being retired and removed from the lists.
1105         *
1106         * We do not hold a reference to the request before @signal, and
1107         * so must be very careful to ensure that it is not _recycled_ as
1108         * we follow the link backwards.
1109         */
1110        fence = NULL;
1111        rcu_read_lock();
1112        do {
1113                struct list_head *pos = READ_ONCE(signal->link.prev);
1114                struct i915_request *prev;
1115
1116                /* Confirm signal has not been retired, the link is valid */
1117                if (unlikely(__i915_request_has_started(signal)))
1118                        break;
1119
1120                /* Is signal the earliest request on its timeline? */
1121                if (pos == &rcu_dereference(signal->timeline)->requests)
1122                        break;
1123
1124                /*
1125                 * Peek at the request before us in the timeline. That
1126                 * request will only be valid before it is retired, so
1127                 * after acquiring a reference to it, confirm that it is
1128                 * still part of the signaler's timeline.
1129                 */
1130                prev = list_entry(pos, typeof(*prev), link);
1131                if (!i915_request_get_rcu(prev))
1132                        break;
1133
1134                /* After the strong barrier, confirm prev is still attached */
1135                if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
1136                        i915_request_put(prev);
1137                        break;
1138                }
1139
1140                fence = &prev->fence;
1141        } while (0);
1142        rcu_read_unlock();
1143        if (!fence)
1144                return 0;
1145
1146        err = 0;
1147        if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1148                err = i915_sw_fence_await_dma_fence(&rq->submit,
1149                                                    fence, 0,
1150                                                    I915_FENCE_GFP);
1151        dma_fence_put(fence);
1152
1153        return err;
1154}
1155
1156static intel_engine_mask_t
1157already_busywaiting(struct i915_request *rq)
1158{
1159        /*
1160         * Polling a semaphore causes bus traffic, delaying other users of
1161         * both the GPU and CPU. We want to limit the impact on others,
1162         * while taking advantage of early submission to reduce GPU
1163         * latency. Therefore we restrict ourselves to not using more
1164         * than one semaphore from each source, and not using a semaphore
1165         * if we have detected the engine is saturated (i.e. would not be
1166         * submitted early and cause bus traffic reading an already passed
1167         * semaphore).
1168         *
1169         * See the are-we-too-late? check in __i915_request_submit().
1170         */
1171        return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1172}
1173
1174static int
1175__emit_semaphore_wait(struct i915_request *to,
1176                      struct i915_request *from,
1177                      u32 seqno)
1178{
1179        const int has_token = INTEL_GEN(to->engine->i915) >= 12;
1180        u32 hwsp_offset;
1181        int len, err;
1182        u32 *cs;
1183
1184        GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
1185        GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
1186
1187        /* We need to pin the signaler's HWSP until we are finished reading. */
1188        err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
1189        if (err)
1190                return err;
1191
1192        len = 4;
1193        if (has_token)
1194                len += 2;
1195
1196        cs = intel_ring_begin(to, len);
1197        if (IS_ERR(cs))
1198                return PTR_ERR(cs);
1199
1200        /*
1201         * Using greater-than-or-equal here means we have to worry
1202         * about seqno wraparound. To side step that issue, we swap
1203         * the timeline HWSP upon wrapping, so that everyone listening
1204         * for the old (pre-wrap) values do not see the much smaller
1205         * (post-wrap) values than they were expecting (and so wait
1206         * forever).
1207         */
1208        *cs++ = (MI_SEMAPHORE_WAIT |
1209                 MI_SEMAPHORE_GLOBAL_GTT |
1210                 MI_SEMAPHORE_POLL |
1211                 MI_SEMAPHORE_SAD_GTE_SDD) +
1212                has_token;
1213        *cs++ = seqno;
1214        *cs++ = hwsp_offset;
1215        *cs++ = 0;
1216        if (has_token) {
1217                *cs++ = 0;
1218                *cs++ = MI_NOOP;
1219        }
1220
1221        intel_ring_advance(to, cs);
1222        return 0;
1223}
1224
1225static int
1226emit_semaphore_wait(struct i915_request *to,
1227                    struct i915_request *from,
1228                    gfp_t gfp)
1229{
1230        const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
1231        struct i915_sw_fence *wait = &to->submit;
1232
1233        if (!intel_context_use_semaphores(to->context))
1234                goto await_fence;
1235
1236        if (i915_request_has_initial_breadcrumb(to))
1237                goto await_fence;
1238
1239        /*
1240         * If this or its dependents are waiting on an external fence
1241         * that may fail catastrophically, then we want to avoid using
1242         * sempahores as they bypass the fence signaling metadata, and we
1243         * lose the fence->error propagation.
1244         */
1245        if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
1246                goto await_fence;
1247
1248        /* Just emit the first semaphore we see as request space is limited. */
1249        if (already_busywaiting(to) & mask)
1250                goto await_fence;
1251
1252        if (i915_request_await_start(to, from) < 0)
1253                goto await_fence;
1254
1255        /* Only submit our spinner after the signaler is running! */
1256        if (__await_execution(to, from, NULL, gfp))
1257                goto await_fence;
1258
1259        if (__emit_semaphore_wait(to, from, from->fence.seqno))
1260                goto await_fence;
1261
1262        to->sched.semaphores |= mask;
1263        wait = &to->semaphore;
1264
1265await_fence:
1266        return i915_sw_fence_await_dma_fence(wait,
1267                                             &from->fence, 0,
1268                                             I915_FENCE_GFP);
1269}
1270
1271static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
1272                                          struct dma_fence *fence)
1273{
1274        return __intel_timeline_sync_is_later(tl,
1275                                              fence->context,
1276                                              fence->seqno - 1);
1277}
1278
1279static int intel_timeline_sync_set_start(struct intel_timeline *tl,
1280                                         const struct dma_fence *fence)
1281{
1282        return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
1283}
1284
1285static int
1286__i915_request_await_execution(struct i915_request *to,
1287                               struct i915_request *from,
1288                               void (*hook)(struct i915_request *rq,
1289                                            struct dma_fence *signal))
1290{
1291        int err;
1292
1293        GEM_BUG_ON(intel_context_is_barrier(from->context));
1294
1295        /* Submit both requests at the same time */
1296        err = __await_execution(to, from, hook, I915_FENCE_GFP);
1297        if (err)
1298                return err;
1299
1300        /* Squash repeated depenendices to the same timelines */
1301        if (intel_timeline_sync_has_start(i915_request_timeline(to),
1302                                          &from->fence))
1303                return 0;
1304
1305        /*
1306         * Wait until the start of this request.
1307         *
1308         * The execution cb fires when we submit the request to HW. But in
1309         * many cases this may be long before the request itself is ready to
1310         * run (consider that we submit 2 requests for the same context, where
1311         * the request of interest is behind an indefinite spinner). So we hook
1312         * up to both to reduce our queues and keep the execution lag minimised
1313         * in the worst case, though we hope that the await_start is elided.
1314         */
1315        err = i915_request_await_start(to, from);
1316        if (err < 0)
1317                return err;
1318
1319        /*
1320         * Ensure both start together [after all semaphores in signal]
1321         *
1322         * Now that we are queued to the HW at roughly the same time (thanks
1323         * to the execute cb) and are ready to run at roughly the same time
1324         * (thanks to the await start), our signaler may still be indefinitely
1325         * delayed by waiting on a semaphore from a remote engine. If our
1326         * signaler depends on a semaphore, so indirectly do we, and we do not
1327         * want to start our payload until our signaler also starts theirs.
1328         * So we wait.
1329         *
1330         * However, there is also a second condition for which we need to wait
1331         * for the precise start of the signaler. Consider that the signaler
1332         * was submitted in a chain of requests following another context
1333         * (with just an ordinary intra-engine fence dependency between the
1334         * two). In this case the signaler is queued to HW, but not for
1335         * immediate execution, and so we must wait until it reaches the
1336         * active slot.
1337         */
1338        if (intel_engine_has_semaphores(to->engine) &&
1339            !i915_request_has_initial_breadcrumb(to)) {
1340                err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
1341                if (err < 0)
1342                        return err;
1343        }
1344
1345        /* Couple the dependency tree for PI on this exposed to->fence */
1346        if (to->engine->schedule) {
1347                err = i915_sched_node_add_dependency(&to->sched,
1348                                                     &from->sched,
1349                                                     I915_DEPENDENCY_WEAK);
1350                if (err < 0)
1351                        return err;
1352        }
1353
1354        return intel_timeline_sync_set_start(i915_request_timeline(to),
1355                                             &from->fence);
1356}
1357
1358static void mark_external(struct i915_request *rq)
1359{
1360        /*
1361         * The downside of using semaphores is that we lose metadata passing
1362         * along the signaling chain. This is particularly nasty when we
1363         * need to pass along a fatal error such as EFAULT or EDEADLK. For
1364         * fatal errors we want to scrub the request before it is executed,
1365         * which means that we cannot preload the request onto HW and have
1366         * it wait upon a semaphore.
1367         */
1368        rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1369}
1370
1371static int
1372__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1373{
1374        mark_external(rq);
1375        return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1376                                             i915_fence_context_timeout(rq->engine->i915,
1377                                                                        fence->context),
1378                                             I915_FENCE_GFP);
1379}
1380
1381static int
1382i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1383{
1384        struct dma_fence *iter;
1385        int err = 0;
1386
1387        if (!to_dma_fence_chain(fence))
1388                return __i915_request_await_external(rq, fence);
1389
1390        dma_fence_chain_for_each(iter, fence) {
1391                struct dma_fence_chain *chain = to_dma_fence_chain(iter);
1392
1393                if (!dma_fence_is_i915(chain->fence)) {
1394                        err = __i915_request_await_external(rq, iter);
1395                        break;
1396                }
1397
1398                err = i915_request_await_dma_fence(rq, chain->fence);
1399                if (err < 0)
1400                        break;
1401        }
1402
1403        dma_fence_put(iter);
1404        return err;
1405}
1406
1407int
1408i915_request_await_execution(struct i915_request *rq,
1409                             struct dma_fence *fence,
1410                             void (*hook)(struct i915_request *rq,
1411                                          struct dma_fence *signal))
1412{
1413        struct dma_fence **child = &fence;
1414        unsigned int nchild = 1;
1415        int ret;
1416
1417        if (dma_fence_is_array(fence)) {
1418                struct dma_fence_array *array = to_dma_fence_array(fence);
1419
1420                /* XXX Error for signal-on-any fence arrays */
1421
1422                child = array->fences;
1423                nchild = array->num_fences;
1424                GEM_BUG_ON(!nchild);
1425        }
1426
1427        do {
1428                fence = *child++;
1429                if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1430                        i915_sw_fence_set_error_once(&rq->submit, fence->error);
1431                        continue;
1432                }
1433
1434                if (fence->context == rq->fence.context)
1435                        continue;
1436
1437                /*
1438                 * We don't squash repeated fence dependencies here as we
1439                 * want to run our callback in all cases.
1440                 */
1441
1442                if (dma_fence_is_i915(fence))
1443                        ret = __i915_request_await_execution(rq,
1444                                                             to_request(fence),
1445                                                             hook);
1446                else
1447                        ret = i915_request_await_external(rq, fence);
1448                if (ret < 0)
1449                        return ret;
1450        } while (--nchild);
1451
1452        return 0;
1453}
1454
1455static int
1456await_request_submit(struct i915_request *to, struct i915_request *from)
1457{
1458        /*
1459         * If we are waiting on a virtual engine, then it may be
1460         * constrained to execute on a single engine *prior* to submission.
1461         * When it is submitted, it will be first submitted to the virtual
1462         * engine and then passed to the physical engine. We cannot allow
1463         * the waiter to be submitted immediately to the physical engine
1464         * as it may then bypass the virtual request.
1465         */
1466        if (to->engine == READ_ONCE(from->engine))
1467                return i915_sw_fence_await_sw_fence_gfp(&to->submit,
1468                                                        &from->submit,
1469                                                        I915_FENCE_GFP);
1470        else
1471                return __i915_request_await_execution(to, from, NULL);
1472}
1473
1474static int
1475i915_request_await_request(struct i915_request *to, struct i915_request *from)
1476{
1477        int ret;
1478
1479        GEM_BUG_ON(to == from);
1480        GEM_BUG_ON(to->timeline == from->timeline);
1481
1482        if (i915_request_completed(from)) {
1483                i915_sw_fence_set_error_once(&to->submit, from->fence.error);
1484                return 0;
1485        }
1486
1487        if (to->engine->schedule) {
1488                ret = i915_sched_node_add_dependency(&to->sched,
1489                                                     &from->sched,
1490                                                     I915_DEPENDENCY_EXTERNAL);
1491                if (ret < 0)
1492                        return ret;
1493        }
1494
1495        if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
1496                ret = await_request_submit(to, from);
1497        else
1498                ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
1499        if (ret < 0)
1500                return ret;
1501
1502        return 0;
1503}
1504
1505int
1506i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1507{
1508        struct dma_fence **child = &fence;
1509        unsigned int nchild = 1;
1510        int ret;
1511
1512        /*
1513         * Note that if the fence-array was created in signal-on-any mode,
1514         * we should *not* decompose it into its individual fences. However,
1515         * we don't currently store which mode the fence-array is operating
1516         * in. Fortunately, the only user of signal-on-any is private to
1517         * amdgpu and we should not see any incoming fence-array from
1518         * sync-file being in signal-on-any mode.
1519         */
1520        if (dma_fence_is_array(fence)) {
1521                struct dma_fence_array *array = to_dma_fence_array(fence);
1522
1523                child = array->fences;
1524                nchild = array->num_fences;
1525                GEM_BUG_ON(!nchild);
1526        }
1527
1528        do {
1529                fence = *child++;
1530                if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1531                        i915_sw_fence_set_error_once(&rq->submit, fence->error);
1532                        continue;
1533                }
1534
1535                /*
1536                 * Requests on the same timeline are explicitly ordered, along
1537                 * with their dependencies, by i915_request_add() which ensures
1538                 * that requests are submitted in-order through each ring.
1539                 */
1540                if (fence->context == rq->fence.context)
1541                        continue;
1542
1543                /* Squash repeated waits to the same timelines */
1544                if (fence->context &&
1545                    intel_timeline_sync_is_later(i915_request_timeline(rq),
1546                                                 fence))
1547                        continue;
1548
1549                if (dma_fence_is_i915(fence))
1550                        ret = i915_request_await_request(rq, to_request(fence));
1551                else
1552                        ret = i915_request_await_external(rq, fence);
1553                if (ret < 0)
1554                        return ret;
1555
1556                /* Record the latest fence used against each timeline */
1557                if (fence->context)
1558                        intel_timeline_sync_set(i915_request_timeline(rq),
1559                                                fence);
1560        } while (--nchild);
1561
1562        return 0;
1563}
1564
1565/**
1566 * i915_request_await_object - set this request to (async) wait upon a bo
1567 * @to: request we are wishing to use
1568 * @obj: object which may be in use on another ring.
1569 * @write: whether the wait is on behalf of a writer
1570 *
1571 * This code is meant to abstract object synchronization with the GPU.
1572 * Conceptually we serialise writes between engines inside the GPU.
1573 * We only allow one engine to write into a buffer at any time, but
1574 * multiple readers. To ensure each has a coherent view of memory, we must:
1575 *
1576 * - If there is an outstanding write request to the object, the new
1577 *   request must wait for it to complete (either CPU or in hw, requests
1578 *   on the same ring will be naturally ordered).
1579 *
1580 * - If we are a write request (pending_write_domain is set), the new
1581 *   request must wait for outstanding read requests to complete.
1582 *
1583 * Returns 0 if successful, else propagates up the lower layer error.
1584 */
1585int
1586i915_request_await_object(struct i915_request *to,
1587                          struct drm_i915_gem_object *obj,
1588                          bool write)
1589{
1590        struct dma_fence *excl;
1591        int ret = 0;
1592
1593        if (write) {
1594                struct dma_fence **shared;
1595                unsigned int count, i;
1596
1597                ret = dma_resv_get_fences_rcu(obj->base.resv,
1598                                                        &excl, &count, &shared);
1599                if (ret)
1600                        return ret;
1601
1602                for (i = 0; i < count; i++) {
1603                        ret = i915_request_await_dma_fence(to, shared[i]);
1604                        if (ret)
1605                                break;
1606
1607                        dma_fence_put(shared[i]);
1608                }
1609
1610                for (; i < count; i++)
1611                        dma_fence_put(shared[i]);
1612                kfree(shared);
1613        } else {
1614                excl = dma_resv_get_excl_rcu(obj->base.resv);
1615        }
1616
1617        if (excl) {
1618                if (ret == 0)
1619                        ret = i915_request_await_dma_fence(to, excl);
1620
1621                dma_fence_put(excl);
1622        }
1623
1624        return ret;
1625}
1626
1627static struct i915_request *
1628__i915_request_add_to_timeline(struct i915_request *rq)
1629{
1630        struct intel_timeline *timeline = i915_request_timeline(rq);
1631        struct i915_request *prev;
1632
1633        /*
1634         * Dependency tracking and request ordering along the timeline
1635         * is special cased so that we can eliminate redundant ordering
1636         * operations while building the request (we know that the timeline
1637         * itself is ordered, and here we guarantee it).
1638         *
1639         * As we know we will need to emit tracking along the timeline,
1640         * we embed the hooks into our request struct -- at the cost of
1641         * having to have specialised no-allocation interfaces (which will
1642         * be beneficial elsewhere).
1643         *
1644         * A second benefit to open-coding i915_request_await_request is
1645         * that we can apply a slight variant of the rules specialised
1646         * for timelines that jump between engines (such as virtual engines).
1647         * If we consider the case of virtual engine, we must emit a dma-fence
1648         * to prevent scheduling of the second request until the first is
1649         * complete (to maximise our greedy late load balancing) and this
1650         * precludes optimising to use semaphores serialisation of a single
1651         * timeline across engines.
1652         */
1653        prev = to_request(__i915_active_fence_set(&timeline->last_request,
1654                                                  &rq->fence));
1655        if (prev && !__i915_request_is_complete(prev)) {
1656                /*
1657                 * The requests are supposed to be kept in order. However,
1658                 * we need to be wary in case the timeline->last_request
1659                 * is used as a barrier for external modification to this
1660                 * context.
1661                 */
1662                GEM_BUG_ON(prev->context == rq->context &&
1663                           i915_seqno_passed(prev->fence.seqno,
1664                                             rq->fence.seqno));
1665
1666                if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
1667                        i915_sw_fence_await_sw_fence(&rq->submit,
1668                                                     &prev->submit,
1669                                                     &rq->submitq);
1670                else
1671                        __i915_sw_fence_await_dma_fence(&rq->submit,
1672                                                        &prev->fence,
1673                                                        &rq->dmaq);
1674                if (rq->engine->schedule)
1675                        __i915_sched_node_add_dependency(&rq->sched,
1676                                                         &prev->sched,
1677                                                         &rq->dep,
1678                                                         0);
1679        }
1680
1681        /*
1682         * Make sure that no request gazumped us - if it was allocated after
1683         * our i915_request_alloc() and called __i915_request_add() before
1684         * us, the timeline will hold its seqno which is later than ours.
1685         */
1686        GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1687
1688        return prev;
1689}
1690
1691/*
1692 * NB: This function is not allowed to fail. Doing so would mean the the
1693 * request is not being tracked for completion but the work itself is
1694 * going to happen on the hardware. This would be a Bad Thing(tm).
1695 */
1696struct i915_request *__i915_request_commit(struct i915_request *rq)
1697{
1698        struct intel_engine_cs *engine = rq->engine;
1699        struct intel_ring *ring = rq->ring;
1700        u32 *cs;
1701
1702        RQ_TRACE(rq, "\n");
1703
1704        /*
1705         * To ensure that this call will not fail, space for its emissions
1706         * should already have been reserved in the ring buffer. Let the ring
1707         * know that it is time to use that space up.
1708         */
1709        GEM_BUG_ON(rq->reserved_space > ring->space);
1710        rq->reserved_space = 0;
1711        rq->emitted_jiffies = jiffies;
1712
1713        /*
1714         * Record the position of the start of the breadcrumb so that
1715         * should we detect the updated seqno part-way through the
1716         * GPU processing the request, we never over-estimate the
1717         * position of the ring's HEAD.
1718         */
1719        cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1720        GEM_BUG_ON(IS_ERR(cs));
1721        rq->postfix = intel_ring_offset(rq, cs);
1722
1723        return __i915_request_add_to_timeline(rq);
1724}
1725
1726void __i915_request_queue_bh(struct i915_request *rq)
1727{
1728        i915_sw_fence_commit(&rq->semaphore);
1729        i915_sw_fence_commit(&rq->submit);
1730}
1731
1732void __i915_request_queue(struct i915_request *rq,
1733                          const struct i915_sched_attr *attr)
1734{
1735        /*
1736         * Let the backend know a new request has arrived that may need
1737         * to adjust the existing execution schedule due to a high priority
1738         * request - i.e. we may want to preempt the current request in order
1739         * to run a high priority dependency chain *before* we can execute this
1740         * request.
1741         *
1742         * This is called before the request is ready to run so that we can
1743         * decide whether to preempt the entire chain so that it is ready to
1744         * run at the earliest possible convenience.
1745         */
1746        if (attr && rq->engine->schedule)
1747                rq->engine->schedule(rq, attr);
1748
1749        local_bh_disable();
1750        __i915_request_queue_bh(rq);
1751        local_bh_enable(); /* kick tasklets */
1752}
1753
1754void i915_request_add(struct i915_request *rq)
1755{
1756        struct intel_timeline * const tl = i915_request_timeline(rq);
1757        struct i915_sched_attr attr = {};
1758        struct i915_gem_context *ctx;
1759
1760        lockdep_assert_held(&tl->mutex);
1761        lockdep_unpin_lock(&tl->mutex, rq->cookie);
1762
1763        trace_i915_request_add(rq);
1764        __i915_request_commit(rq);
1765
1766        /* XXX placeholder for selftests */
1767        rcu_read_lock();
1768        ctx = rcu_dereference(rq->context->gem_context);
1769        if (ctx)
1770                attr = ctx->sched;
1771        rcu_read_unlock();
1772
1773        __i915_request_queue(rq, &attr);
1774
1775        mutex_unlock(&tl->mutex);
1776}
1777
1778static unsigned long local_clock_ns(unsigned int *cpu)
1779{
1780        unsigned long t;
1781
1782        /*
1783         * Cheaply and approximately convert from nanoseconds to microseconds.
1784         * The result and subsequent calculations are also defined in the same
1785         * approximate microseconds units. The principal source of timing
1786         * error here is from the simple truncation.
1787         *
1788         * Note that local_clock() is only defined wrt to the current CPU;
1789         * the comparisons are no longer valid if we switch CPUs. Instead of
1790         * blocking preemption for the entire busywait, we can detect the CPU
1791         * switch and use that as indicator of system load and a reason to
1792         * stop busywaiting, see busywait_stop().
1793         */
1794        *cpu = get_cpu();
1795        t = local_clock();
1796        put_cpu();
1797
1798        return t;
1799}
1800
1801static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1802{
1803        unsigned int this_cpu;
1804
1805        if (time_after(local_clock_ns(&this_cpu), timeout))
1806                return true;
1807
1808        return this_cpu != cpu;
1809}
1810
1811static bool __i915_spin_request(struct i915_request * const rq, int state)
1812{
1813        unsigned long timeout_ns;
1814        unsigned int cpu;
1815
1816        /*
1817         * Only wait for the request if we know it is likely to complete.
1818         *
1819         * We don't track the timestamps around requests, nor the average
1820         * request length, so we do not have a good indicator that this
1821         * request will complete within the timeout. What we do know is the
1822         * order in which requests are executed by the context and so we can
1823         * tell if the request has been started. If the request is not even
1824         * running yet, it is a fair assumption that it will not complete
1825         * within our relatively short timeout.
1826         */
1827        if (!i915_request_is_running(rq))
1828                return false;
1829
1830        /*
1831         * When waiting for high frequency requests, e.g. during synchronous
1832         * rendering split between the CPU and GPU, the finite amount of time
1833         * required to set up the irq and wait upon it limits the response
1834         * rate. By busywaiting on the request completion for a short while we
1835         * can service the high frequency waits as quick as possible. However,
1836         * if it is a slow request, we want to sleep as quickly as possible.
1837         * The tradeoff between waiting and sleeping is roughly the time it
1838         * takes to sleep on a request, on the order of a microsecond.
1839         */
1840
1841        timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1842        timeout_ns += local_clock_ns(&cpu);
1843        do {
1844                if (dma_fence_is_signaled(&rq->fence))
1845                        return true;
1846
1847                if (signal_pending_state(state, current))
1848                        break;
1849
1850                if (busywait_stop(timeout_ns, cpu))
1851                        break;
1852
1853                cpu_relax();
1854        } while (!need_resched());
1855
1856        return false;
1857}
1858
1859struct request_wait {
1860        struct dma_fence_cb cb;
1861        struct task_struct *tsk;
1862};
1863
1864static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1865{
1866        struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1867
1868        wake_up_process(fetch_and_zero(&wait->tsk));
1869}
1870
1871/**
1872 * i915_request_wait - wait until execution of request has finished
1873 * @rq: the request to wait upon
1874 * @flags: how to wait
1875 * @timeout: how long to wait in jiffies
1876 *
1877 * i915_request_wait() waits for the request to be completed, for a
1878 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1879 * unbounded wait).
1880 *
1881 * Returns the remaining time (in jiffies) if the request completed, which may
1882 * be zero or -ETIME if the request is unfinished after the timeout expires.
1883 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1884 * pending before the request completes.
1885 */
1886long i915_request_wait(struct i915_request *rq,
1887                       unsigned int flags,
1888                       long timeout)
1889{
1890        const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1891                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1892        struct request_wait wait;
1893
1894        might_sleep();
1895        GEM_BUG_ON(timeout < 0);
1896
1897        if (dma_fence_is_signaled(&rq->fence))
1898                return timeout;
1899
1900        if (!timeout)
1901                return -ETIME;
1902
1903        trace_i915_request_wait_begin(rq, flags);
1904
1905        /*
1906         * We must never wait on the GPU while holding a lock as we
1907         * may need to perform a GPU reset. So while we don't need to
1908         * serialise wait/reset with an explicit lock, we do want
1909         * lockdep to detect potential dependency cycles.
1910         */
1911        mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1912
1913        /*
1914         * Optimistic spin before touching IRQs.
1915         *
1916         * We may use a rather large value here to offset the penalty of
1917         * switching away from the active task. Frequently, the client will
1918         * wait upon an old swapbuffer to throttle itself to remain within a
1919         * frame of the gpu. If the client is running in lockstep with the gpu,
1920         * then it should not be waiting long at all, and a sleep now will incur
1921         * extra scheduler latency in producing the next frame. To try to
1922         * avoid adding the cost of enabling/disabling the interrupt to the
1923         * short wait, we first spin to see if the request would have completed
1924         * in the time taken to setup the interrupt.
1925         *
1926         * We need upto 5us to enable the irq, and upto 20us to hide the
1927         * scheduler latency of a context switch, ignoring the secondary
1928         * impacts from a context switch such as cache eviction.
1929         *
1930         * The scheme used for low-latency IO is called "hybrid interrupt
1931         * polling". The suggestion there is to sleep until just before you
1932         * expect to be woken by the device interrupt and then poll for its
1933         * completion. That requires having a good predictor for the request
1934         * duration, which we currently lack.
1935         */
1936        if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
1937            __i915_spin_request(rq, state))
1938                goto out;
1939
1940        /*
1941         * This client is about to stall waiting for the GPU. In many cases
1942         * this is undesirable and limits the throughput of the system, as
1943         * many clients cannot continue processing user input/output whilst
1944         * blocked. RPS autotuning may take tens of milliseconds to respond
1945         * to the GPU load and thus incurs additional latency for the client.
1946         * We can circumvent that by promoting the GPU frequency to maximum
1947         * before we sleep. This makes the GPU throttle up much more quickly
1948         * (good for benchmarks and user experience, e.g. window animations),
1949         * but at a cost of spending more power processing the workload
1950         * (bad for battery).
1951         */
1952        if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1953                intel_rps_boost(rq);
1954
1955        wait.tsk = current;
1956        if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1957                goto out;
1958
1959        /*
1960         * Flush the submission tasklet, but only if it may help this request.
1961         *
1962         * We sometimes experience some latency between the HW interrupts and
1963         * tasklet execution (mostly due to ksoftirqd latency, but it can also
1964         * be due to lazy CS events), so lets run the tasklet manually if there
1965         * is a chance it may submit this request. If the request is not ready
1966         * to run, as it is waiting for other fences to be signaled, flushing
1967         * the tasklet is busy work without any advantage for this client.
1968         *
1969         * If the HW is being lazy, this is the last chance before we go to
1970         * sleep to catch any pending events. We will check periodically in
1971         * the heartbeat to flush the submission tasklets as a last resort
1972         * for unhappy HW.
1973         */
1974        if (i915_request_is_ready(rq))
1975                __intel_engine_flush_submission(rq->engine, false);
1976
1977        for (;;) {
1978                set_current_state(state);
1979
1980                if (dma_fence_is_signaled(&rq->fence))
1981                        break;
1982
1983                if (signal_pending_state(state, current)) {
1984                        timeout = -ERESTARTSYS;
1985                        break;
1986                }
1987
1988                if (!timeout) {
1989                        timeout = -ETIME;
1990                        break;
1991                }
1992
1993                timeout = io_schedule_timeout(timeout);
1994        }
1995        __set_current_state(TASK_RUNNING);
1996
1997        if (READ_ONCE(wait.tsk))
1998                dma_fence_remove_callback(&rq->fence, &wait.cb);
1999        GEM_BUG_ON(!list_empty(&wait.cb.node));
2000
2001out:
2002        mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
2003        trace_i915_request_wait_end(rq);
2004        return timeout;
2005}
2006
2007static int print_sched_attr(const struct i915_sched_attr *attr,
2008                            char *buf, int x, int len)
2009{
2010        if (attr->priority == I915_PRIORITY_INVALID)
2011                return x;
2012
2013        x += snprintf(buf + x, len - x,
2014                      " prio=%d", attr->priority);
2015
2016        return x;
2017}
2018
2019static char queue_status(const struct i915_request *rq)
2020{
2021        if (i915_request_is_active(rq))
2022                return 'E';
2023
2024        if (i915_request_is_ready(rq))
2025                return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
2026
2027        return 'U';
2028}
2029
2030static const char *run_status(const struct i915_request *rq)
2031{
2032        if (__i915_request_is_complete(rq))
2033                return "!";
2034
2035        if (__i915_request_has_started(rq))
2036                return "*";
2037
2038        if (!i915_sw_fence_signaled(&rq->semaphore))
2039                return "&";
2040
2041        return "";
2042}
2043
2044static const char *fence_status(const struct i915_request *rq)
2045{
2046        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
2047                return "+";
2048
2049        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2050                return "-";
2051
2052        return "";
2053}
2054
2055void i915_request_show(struct drm_printer *m,
2056                       const struct i915_request *rq,
2057                       const char *prefix,
2058                       int indent)
2059{
2060        const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
2061        char buf[80] = "";
2062        int x = 0;
2063
2064        /*
2065         * The prefix is used to show the queue status, for which we use
2066         * the following flags:
2067         *
2068         *  U [Unready]
2069         *    - initial status upon being submitted by the user
2070         *
2071         *    - the request is not ready for execution as it is waiting
2072         *      for external fences
2073         *
2074         *  R [Ready]
2075         *    - all fences the request was waiting on have been signaled,
2076         *      and the request is now ready for execution and will be
2077         *      in a backend queue
2078         *
2079         *    - a ready request may still need to wait on semaphores
2080         *      [internal fences]
2081         *
2082         *  V [Ready/virtual]
2083         *    - same as ready, but queued over multiple backends
2084         *
2085         *  E [Executing]
2086         *    - the request has been transferred from the backend queue and
2087         *      submitted for execution on HW
2088         *
2089         *    - a completed request may still be regarded as executing, its
2090         *      status may not be updated until it is retired and removed
2091         *      from the lists
2092         */
2093
2094        x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2095
2096        drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
2097                   prefix, indent, "                ",
2098                   queue_status(rq),
2099                   rq->fence.context, rq->fence.seqno,
2100                   run_status(rq),
2101                   fence_status(rq),
2102                   buf,
2103                   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2104                   name);
2105}
2106
2107#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2108#include "selftests/mock_request.c"
2109#include "selftests/i915_request.c"
2110#endif
2111
2112static void i915_global_request_shrink(void)
2113{
2114        kmem_cache_shrink(global.slab_execute_cbs);
2115        kmem_cache_shrink(global.slab_requests);
2116}
2117
2118static void i915_global_request_exit(void)
2119{
2120        kmem_cache_destroy(global.slab_execute_cbs);
2121        kmem_cache_destroy(global.slab_requests);
2122}
2123
2124static struct i915_global_request global = { {
2125        .shrink = i915_global_request_shrink,
2126        .exit = i915_global_request_exit,
2127} };
2128
2129int __init i915_global_request_init(void)
2130{
2131        global.slab_requests =
2132                kmem_cache_create("i915_request",
2133                                  sizeof(struct i915_request),
2134                                  __alignof__(struct i915_request),
2135                                  SLAB_HWCACHE_ALIGN |
2136                                  SLAB_RECLAIM_ACCOUNT |
2137                                  SLAB_TYPESAFE_BY_RCU,
2138                                  __i915_request_ctor);
2139        if (!global.slab_requests)
2140                return -ENOMEM;
2141
2142        global.slab_execute_cbs = KMEM_CACHE(execute_cb,
2143                                             SLAB_HWCACHE_ALIGN |
2144                                             SLAB_RECLAIM_ACCOUNT |
2145                                             SLAB_TYPESAFE_BY_RCU);
2146        if (!global.slab_execute_cbs)
2147                goto err_requests;
2148
2149        i915_global_register(&global.base);
2150        return 0;
2151
2152err_requests:
2153        kmem_cache_destroy(global.slab_requests);
2154        return -ENOMEM;
2155}
2156