linux/drivers/gpu/drm/i915/i915_scheduler.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2018 Intel Corporation
   5 */
   6
   7#include <linux/mutex.h>
   8
   9#include "i915_drv.h"
  10#include "i915_globals.h"
  11#include "i915_request.h"
  12#include "i915_scheduler.h"
  13
  14static struct i915_global_scheduler {
  15        struct i915_global base;
  16        struct kmem_cache *slab_dependencies;
  17        struct kmem_cache *slab_priorities;
  18} global;
  19
  20static DEFINE_SPINLOCK(schedule_lock);
  21
  22static const struct i915_request *
  23node_to_request(const struct i915_sched_node *node)
  24{
  25        return container_of(node, const struct i915_request, sched);
  26}
  27
  28static inline bool node_started(const struct i915_sched_node *node)
  29{
  30        return i915_request_started(node_to_request(node));
  31}
  32
  33static inline bool node_signaled(const struct i915_sched_node *node)
  34{
  35        return i915_request_completed(node_to_request(node));
  36}
  37
  38static inline struct i915_priolist *to_priolist(struct rb_node *rb)
  39{
  40        return rb_entry(rb, struct i915_priolist, node);
  41}
  42
  43static void assert_priolists(struct intel_engine_execlists * const execlists)
  44{
  45        struct rb_node *rb;
  46        long last_prio, i;
  47
  48        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
  49                return;
  50
  51        GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
  52                   rb_first(&execlists->queue.rb_root));
  53
  54        last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
  55        for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
  56                const struct i915_priolist *p = to_priolist(rb);
  57
  58                GEM_BUG_ON(p->priority >= last_prio);
  59                last_prio = p->priority;
  60
  61                GEM_BUG_ON(!p->used);
  62                for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
  63                        if (list_empty(&p->requests[i]))
  64                                continue;
  65
  66                        GEM_BUG_ON(!(p->used & BIT(i)));
  67                }
  68        }
  69}
  70
  71struct list_head *
  72i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
  73{
  74        struct intel_engine_execlists * const execlists = &engine->execlists;
  75        struct i915_priolist *p;
  76        struct rb_node **parent, *rb;
  77        bool first = true;
  78        int idx, i;
  79
  80        lockdep_assert_held(&engine->active.lock);
  81        assert_priolists(execlists);
  82
  83        /* buckets sorted from highest [in slot 0] to lowest priority */
  84        idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
  85        prio >>= I915_USER_PRIORITY_SHIFT;
  86        if (unlikely(execlists->no_priolist))
  87                prio = I915_PRIORITY_NORMAL;
  88
  89find_priolist:
  90        /* most positive priority is scheduled first, equal priorities fifo */
  91        rb = NULL;
  92        parent = &execlists->queue.rb_root.rb_node;
  93        while (*parent) {
  94                rb = *parent;
  95                p = to_priolist(rb);
  96                if (prio > p->priority) {
  97                        parent = &rb->rb_left;
  98                } else if (prio < p->priority) {
  99                        parent = &rb->rb_right;
 100                        first = false;
 101                } else {
 102                        goto out;
 103                }
 104        }
 105
 106        if (prio == I915_PRIORITY_NORMAL) {
 107                p = &execlists->default_priolist;
 108        } else {
 109                p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
 110                /* Convert an allocation failure to a priority bump */
 111                if (unlikely(!p)) {
 112                        prio = I915_PRIORITY_NORMAL; /* recurses just once */
 113
 114                        /* To maintain ordering with all rendering, after an
 115                         * allocation failure we have to disable all scheduling.
 116                         * Requests will then be executed in fifo, and schedule
 117                         * will ensure that dependencies are emitted in fifo.
 118                         * There will be still some reordering with existing
 119                         * requests, so if userspace lied about their
 120                         * dependencies that reordering may be visible.
 121                         */
 122                        execlists->no_priolist = true;
 123                        goto find_priolist;
 124                }
 125        }
 126
 127        p->priority = prio;
 128        for (i = 0; i < ARRAY_SIZE(p->requests); i++)
 129                INIT_LIST_HEAD(&p->requests[i]);
 130        rb_link_node(&p->node, rb, parent);
 131        rb_insert_color_cached(&p->node, &execlists->queue, first);
 132        p->used = 0;
 133
 134out:
 135        p->used |= BIT(idx);
 136        return &p->requests[idx];
 137}
 138
 139void __i915_priolist_free(struct i915_priolist *p)
 140{
 141        kmem_cache_free(global.slab_priorities, p);
 142}
 143
 144struct sched_cache {
 145        struct list_head *priolist;
 146};
 147
 148static struct intel_engine_cs *
 149sched_lock_engine(const struct i915_sched_node *node,
 150                  struct intel_engine_cs *locked,
 151                  struct sched_cache *cache)
 152{
 153        const struct i915_request *rq = node_to_request(node);
 154        struct intel_engine_cs *engine;
 155
 156        GEM_BUG_ON(!locked);
 157
 158        /*
 159         * Virtual engines complicate acquiring the engine timeline lock,
 160         * as their rq->engine pointer is not stable until under that
 161         * engine lock. The simple ploy we use is to take the lock then
 162         * check that the rq still belongs to the newly locked engine.
 163         */
 164        while (locked != (engine = READ_ONCE(rq->engine))) {
 165                spin_unlock(&locked->active.lock);
 166                memset(cache, 0, sizeof(*cache));
 167                spin_lock(&engine->active.lock);
 168                locked = engine;
 169        }
 170
 171        GEM_BUG_ON(locked != engine);
 172        return locked;
 173}
 174
 175static inline int rq_prio(const struct i915_request *rq)
 176{
 177        return rq->sched.attr.priority | __NO_PREEMPTION;
 178}
 179
 180static inline bool need_preempt(int prio, int active)
 181{
 182        /*
 183         * Allow preemption of low -> normal -> high, but we do
 184         * not allow low priority tasks to preempt other low priority
 185         * tasks under the impression that latency for low priority
 186         * tasks does not matter (as much as background throughput),
 187         * so kiss.
 188         */
 189        return prio >= max(I915_PRIORITY_NORMAL, active);
 190}
 191
 192static void kick_submission(struct intel_engine_cs *engine,
 193                            const struct i915_request *rq,
 194                            int prio)
 195{
 196        const struct i915_request *inflight;
 197
 198        /*
 199         * We only need to kick the tasklet once for the high priority
 200         * new context we add into the queue.
 201         */
 202        if (prio <= engine->execlists.queue_priority_hint)
 203                return;
 204
 205        rcu_read_lock();
 206
 207        /* Nothing currently active? We're overdue for a submission! */
 208        inflight = execlists_active(&engine->execlists);
 209        if (!inflight)
 210                goto unlock;
 211
 212        /*
 213         * If we are already the currently executing context, don't
 214         * bother evaluating if we should preempt ourselves, or if
 215         * we expect nothing to change as a result of running the
 216         * tasklet, i.e. we have not change the priority queue
 217         * sufficiently to oust the running context.
 218         */
 219        if (inflight->hw_context == rq->hw_context)
 220                goto unlock;
 221
 222        engine->execlists.queue_priority_hint = prio;
 223        if (need_preempt(prio, rq_prio(inflight)))
 224                tasklet_hi_schedule(&engine->execlists.tasklet);
 225
 226unlock:
 227        rcu_read_unlock();
 228}
 229
 230static void __i915_schedule(struct i915_sched_node *node,
 231                            const struct i915_sched_attr *attr)
 232{
 233        struct intel_engine_cs *engine;
 234        struct i915_dependency *dep, *p;
 235        struct i915_dependency stack;
 236        const int prio = attr->priority;
 237        struct sched_cache cache;
 238        LIST_HEAD(dfs);
 239
 240        /* Needed in order to use the temporary link inside i915_dependency */
 241        lockdep_assert_held(&schedule_lock);
 242        GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 243
 244        if (prio <= READ_ONCE(node->attr.priority))
 245                return;
 246
 247        if (node_signaled(node))
 248                return;
 249
 250        stack.signaler = node;
 251        list_add(&stack.dfs_link, &dfs);
 252
 253        /*
 254         * Recursively bump all dependent priorities to match the new request.
 255         *
 256         * A naive approach would be to use recursion:
 257         * static void update_priorities(struct i915_sched_node *node, prio) {
 258         *      list_for_each_entry(dep, &node->signalers_list, signal_link)
 259         *              update_priorities(dep->signal, prio)
 260         *      queue_request(node);
 261         * }
 262         * but that may have unlimited recursion depth and so runs a very
 263         * real risk of overunning the kernel stack. Instead, we build
 264         * a flat list of all dependencies starting with the current request.
 265         * As we walk the list of dependencies, we add all of its dependencies
 266         * to the end of the list (this may include an already visited
 267         * request) and continue to walk onwards onto the new dependencies. The
 268         * end result is a topological list of requests in reverse order, the
 269         * last element in the list is the request we must execute first.
 270         */
 271        list_for_each_entry(dep, &dfs, dfs_link) {
 272                struct i915_sched_node *node = dep->signaler;
 273
 274                /* If we are already flying, we know we have no signalers */
 275                if (node_started(node))
 276                        continue;
 277
 278                /*
 279                 * Within an engine, there can be no cycle, but we may
 280                 * refer to the same dependency chain multiple times
 281                 * (redundant dependencies are not eliminated) and across
 282                 * engines.
 283                 */
 284                list_for_each_entry(p, &node->signalers_list, signal_link) {
 285                        GEM_BUG_ON(p == dep); /* no cycles! */
 286
 287                        if (node_signaled(p->signaler))
 288                                continue;
 289
 290                        if (prio > READ_ONCE(p->signaler->attr.priority))
 291                                list_move_tail(&p->dfs_link, &dfs);
 292                }
 293        }
 294
 295        /*
 296         * If we didn't need to bump any existing priorities, and we haven't
 297         * yet submitted this request (i.e. there is no potential race with
 298         * execlists_submit_request()), we can set our own priority and skip
 299         * acquiring the engine locks.
 300         */
 301        if (node->attr.priority == I915_PRIORITY_INVALID) {
 302                GEM_BUG_ON(!list_empty(&node->link));
 303                node->attr = *attr;
 304
 305                if (stack.dfs_link.next == stack.dfs_link.prev)
 306                        return;
 307
 308                __list_del_entry(&stack.dfs_link);
 309        }
 310
 311        memset(&cache, 0, sizeof(cache));
 312        engine = node_to_request(node)->engine;
 313        spin_lock(&engine->active.lock);
 314
 315        /* Fifo and depth-first replacement ensure our deps execute before us */
 316        engine = sched_lock_engine(node, engine, &cache);
 317        list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
 318                INIT_LIST_HEAD(&dep->dfs_link);
 319
 320                node = dep->signaler;
 321                engine = sched_lock_engine(node, engine, &cache);
 322                lockdep_assert_held(&engine->active.lock);
 323
 324                /* Recheck after acquiring the engine->timeline.lock */
 325                if (prio <= node->attr.priority || node_signaled(node))
 326                        continue;
 327
 328                GEM_BUG_ON(node_to_request(node)->engine != engine);
 329
 330                node->attr.priority = prio;
 331
 332                if (list_empty(&node->link)) {
 333                        /*
 334                         * If the request is not in the priolist queue because
 335                         * it is not yet runnable, then it doesn't contribute
 336                         * to our preemption decisions. On the other hand,
 337                         * if the request is on the HW, it too is not in the
 338                         * queue; but in that case we may still need to reorder
 339                         * the inflight requests.
 340                         */
 341                        continue;
 342                }
 343
 344                if (!intel_engine_is_virtual(engine) &&
 345                    !i915_request_is_active(node_to_request(node))) {
 346                        if (!cache.priolist)
 347                                cache.priolist =
 348                                        i915_sched_lookup_priolist(engine,
 349                                                                   prio);
 350                        list_move_tail(&node->link, cache.priolist);
 351                }
 352
 353                /* Defer (tasklet) submission until after all of our updates. */
 354                kick_submission(engine, node_to_request(node), prio);
 355        }
 356
 357        spin_unlock(&engine->active.lock);
 358}
 359
 360void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
 361{
 362        spin_lock_irq(&schedule_lock);
 363        __i915_schedule(&rq->sched, attr);
 364        spin_unlock_irq(&schedule_lock);
 365}
 366
 367static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
 368{
 369        struct i915_sched_attr attr = node->attr;
 370
 371        attr.priority |= bump;
 372        __i915_schedule(node, &attr);
 373}
 374
 375void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
 376{
 377        unsigned long flags;
 378
 379        GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
 380        if (READ_ONCE(rq->sched.attr.priority) & bump)
 381                return;
 382
 383        spin_lock_irqsave(&schedule_lock, flags);
 384        __bump_priority(&rq->sched, bump);
 385        spin_unlock_irqrestore(&schedule_lock, flags);
 386}
 387
 388void i915_sched_node_init(struct i915_sched_node *node)
 389{
 390        INIT_LIST_HEAD(&node->signalers_list);
 391        INIT_LIST_HEAD(&node->waiters_list);
 392        INIT_LIST_HEAD(&node->link);
 393        node->attr.priority = I915_PRIORITY_INVALID;
 394        node->semaphores = 0;
 395        node->flags = 0;
 396}
 397
 398static struct i915_dependency *
 399i915_dependency_alloc(void)
 400{
 401        return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
 402}
 403
 404static void
 405i915_dependency_free(struct i915_dependency *dep)
 406{
 407        kmem_cache_free(global.slab_dependencies, dep);
 408}
 409
 410bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 411                                      struct i915_sched_node *signal,
 412                                      struct i915_dependency *dep,
 413                                      unsigned long flags)
 414{
 415        bool ret = false;
 416
 417        spin_lock_irq(&schedule_lock);
 418
 419        if (!node_signaled(signal)) {
 420                INIT_LIST_HEAD(&dep->dfs_link);
 421                list_add(&dep->wait_link, &signal->waiters_list);
 422                list_add(&dep->signal_link, &node->signalers_list);
 423                dep->signaler = signal;
 424                dep->waiter = node;
 425                dep->flags = flags;
 426
 427                /* Keep track of whether anyone on this chain has a semaphore */
 428                if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
 429                    !node_started(signal))
 430                        node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 431
 432                /*
 433                 * As we do not allow WAIT to preempt inflight requests,
 434                 * once we have executed a request, along with triggering
 435                 * any execution callbacks, we must preserve its ordering
 436                 * within the non-preemptible FIFO.
 437                 */
 438                BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
 439                if (flags & I915_DEPENDENCY_EXTERNAL)
 440                        __bump_priority(signal, __NO_PREEMPTION);
 441
 442                ret = true;
 443        }
 444
 445        spin_unlock_irq(&schedule_lock);
 446
 447        return ret;
 448}
 449
 450int i915_sched_node_add_dependency(struct i915_sched_node *node,
 451                                   struct i915_sched_node *signal)
 452{
 453        struct i915_dependency *dep;
 454
 455        dep = i915_dependency_alloc();
 456        if (!dep)
 457                return -ENOMEM;
 458
 459        if (!__i915_sched_node_add_dependency(node, signal, dep,
 460                                              I915_DEPENDENCY_EXTERNAL |
 461                                              I915_DEPENDENCY_ALLOC))
 462                i915_dependency_free(dep);
 463
 464        return 0;
 465}
 466
 467void i915_sched_node_fini(struct i915_sched_node *node)
 468{
 469        struct i915_dependency *dep, *tmp;
 470
 471        spin_lock_irq(&schedule_lock);
 472
 473        /*
 474         * Everyone we depended upon (the fences we wait to be signaled)
 475         * should retire before us and remove themselves from our list.
 476         * However, retirement is run independently on each timeline and
 477         * so we may be called out-of-order.
 478         */
 479        list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
 480                GEM_BUG_ON(!node_signaled(dep->signaler));
 481                GEM_BUG_ON(!list_empty(&dep->dfs_link));
 482
 483                list_del(&dep->wait_link);
 484                if (dep->flags & I915_DEPENDENCY_ALLOC)
 485                        i915_dependency_free(dep);
 486        }
 487
 488        /* Remove ourselves from everyone who depends upon us */
 489        list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
 490                GEM_BUG_ON(dep->signaler != node);
 491                GEM_BUG_ON(!list_empty(&dep->dfs_link));
 492
 493                list_del(&dep->signal_link);
 494                if (dep->flags & I915_DEPENDENCY_ALLOC)
 495                        i915_dependency_free(dep);
 496        }
 497
 498        spin_unlock_irq(&schedule_lock);
 499}
 500
 501static void i915_global_scheduler_shrink(void)
 502{
 503        kmem_cache_shrink(global.slab_dependencies);
 504        kmem_cache_shrink(global.slab_priorities);
 505}
 506
 507static void i915_global_scheduler_exit(void)
 508{
 509        kmem_cache_destroy(global.slab_dependencies);
 510        kmem_cache_destroy(global.slab_priorities);
 511}
 512
 513static struct i915_global_scheduler global = { {
 514        .shrink = i915_global_scheduler_shrink,
 515        .exit = i915_global_scheduler_exit,
 516} };
 517
 518int __init i915_global_scheduler_init(void)
 519{
 520        global.slab_dependencies = KMEM_CACHE(i915_dependency,
 521                                              SLAB_HWCACHE_ALIGN);
 522        if (!global.slab_dependencies)
 523                return -ENOMEM;
 524
 525        global.slab_priorities = KMEM_CACHE(i915_priolist,
 526                                            SLAB_HWCACHE_ALIGN);
 527        if (!global.slab_priorities)
 528                goto err_priorities;
 529
 530        i915_global_register(&global.base);
 531        return 0;
 532
 533err_priorities:
 534        kmem_cache_destroy(global.slab_priorities);
 535        return -ENOMEM;
 536}
 537