linux/drivers/gpu/drm/i915/gt/intel_engine_pm.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2019 Intel Corporation
   5 */
   6
   7#include "i915_drv.h"
   8
   9#include "intel_context.h"
  10#include "intel_engine.h"
  11#include "intel_engine_heartbeat.h"
  12#include "intel_engine_pm.h"
  13#include "intel_gt.h"
  14#include "intel_gt_pm.h"
  15#include "intel_rc6.h"
  16#include "intel_ring.h"
  17#include "shmem_utils.h"
  18
  19static int __engine_unpark(struct intel_wakeref *wf)
  20{
  21        struct intel_engine_cs *engine =
  22                container_of(wf, typeof(*engine), wakeref);
  23        struct intel_context *ce;
  24
  25        ENGINE_TRACE(engine, "\n");
  26
  27        intel_gt_pm_get(engine->gt);
  28
  29        /* Discard stale context state from across idling */
  30        ce = engine->kernel_context;
  31        if (ce) {
  32                GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
  33
  34                /* First poison the image to verify we never fully trust it */
  35                if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
  36                        struct drm_i915_gem_object *obj = ce->state->obj;
  37                        int type = i915_coherent_map_type(engine->i915);
  38                        void *map;
  39
  40                        map = i915_gem_object_pin_map(obj, type);
  41                        if (!IS_ERR(map)) {
  42                                memset(map, CONTEXT_REDZONE, obj->base.size);
  43                                i915_gem_object_flush_map(obj);
  44                                i915_gem_object_unpin_map(obj);
  45                        }
  46                }
  47
  48                ce->ops->reset(ce);
  49        }
  50
  51        if (engine->unpark)
  52                engine->unpark(engine);
  53
  54        intel_engine_unpark_heartbeat(engine);
  55        return 0;
  56}
  57
  58#if IS_ENABLED(CONFIG_LOCKDEP)
  59
  60static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
  61{
  62        unsigned long flags;
  63
  64        local_irq_save(flags);
  65        mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
  66
  67        return flags;
  68}
  69
  70static inline void __timeline_mark_unlock(struct intel_context *ce,
  71                                          unsigned long flags)
  72{
  73        mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
  74        local_irq_restore(flags);
  75}
  76
  77#else
  78
  79static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
  80{
  81        return 0;
  82}
  83
  84static inline void __timeline_mark_unlock(struct intel_context *ce,
  85                                          unsigned long flags)
  86{
  87}
  88
  89#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
  90
  91static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
  92{
  93        struct i915_request *rq = to_request(fence);
  94
  95        ewma__engine_latency_add(&rq->engine->latency,
  96                                 ktime_us_delta(rq->fence.timestamp,
  97                                                rq->duration.emitted));
  98}
  99
 100static void
 101__queue_and_release_pm(struct i915_request *rq,
 102                       struct intel_timeline *tl,
 103                       struct intel_engine_cs *engine)
 104{
 105        struct intel_gt_timelines *timelines = &engine->gt->timelines;
 106
 107        ENGINE_TRACE(engine, "parking\n");
 108
 109        /*
 110         * We have to serialise all potential retirement paths with our
 111         * submission, as we don't want to underflow either the
 112         * engine->wakeref.counter or our timeline->active_count.
 113         *
 114         * Equally, we cannot allow a new submission to start until
 115         * after we finish queueing, nor could we allow that submitter
 116         * to retire us before we are ready!
 117         */
 118        spin_lock(&timelines->lock);
 119
 120        /* Let intel_gt_retire_requests() retire us (acquired under lock) */
 121        if (!atomic_fetch_inc(&tl->active_count))
 122                list_add_tail(&tl->link, &timelines->active_list);
 123
 124        /* Hand the request over to HW and so engine_retire() */
 125        __i915_request_queue(rq, NULL);
 126
 127        /* Let new submissions commence (and maybe retire this timeline) */
 128        __intel_wakeref_defer_park(&engine->wakeref);
 129
 130        spin_unlock(&timelines->lock);
 131}
 132
 133static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 134{
 135        struct intel_context *ce = engine->kernel_context;
 136        struct i915_request *rq;
 137        unsigned long flags;
 138        bool result = true;
 139
 140        /* GPU is pointing to the void, as good as in the kernel context. */
 141        if (intel_gt_is_wedged(engine->gt))
 142                return true;
 143
 144        GEM_BUG_ON(!intel_context_is_barrier(ce));
 145
 146        /* Already inside the kernel context, safe to power down. */
 147        if (engine->wakeref_serial == engine->serial)
 148                return true;
 149
 150        /*
 151         * Note, we do this without taking the timeline->mutex. We cannot
 152         * as we may be called while retiring the kernel context and so
 153         * already underneath the timeline->mutex. Instead we rely on the
 154         * exclusive property of the __engine_park that prevents anyone
 155         * else from creating a request on this engine. This also requires
 156         * that the ring is empty and we avoid any waits while constructing
 157         * the context, as they assume protection by the timeline->mutex.
 158         * This should hold true as we can only park the engine after
 159         * retiring the last request, thus all rings should be empty and
 160         * all timelines idle.
 161         *
 162         * For unlocking, there are 2 other parties and the GPU who have a
 163         * stake here.
 164         *
 165         * A new gpu user will be waiting on the engine-pm to start their
 166         * engine_unpark. New waiters are predicated on engine->wakeref.count
 167         * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
 168         * engine->wakeref.
 169         *
 170         * The other party is intel_gt_retire_requests(), which is walking the
 171         * list of active timelines looking for completions. Meanwhile as soon
 172         * as we call __i915_request_queue(), the GPU may complete our request.
 173         * Ergo, if we put ourselves on the timelines.active_list
 174         * (se intel_timeline_enter()) before we increment the
 175         * engine->wakeref.count, we may see the request completion and retire
 176         * it causing an underflow of the engine->wakeref.
 177         */
 178        flags = __timeline_mark_lock(ce);
 179        GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
 180
 181        rq = __i915_request_create(ce, GFP_NOWAIT);
 182        if (IS_ERR(rq))
 183                /* Context switch failed, hope for the best! Maybe reset? */
 184                goto out_unlock;
 185
 186        /* Check again on the next retirement. */
 187        engine->wakeref_serial = engine->serial + 1;
 188        i915_request_add_active_barriers(rq);
 189
 190        /* Install ourselves as a preemption barrier */
 191        rq->sched.attr.priority = I915_PRIORITY_BARRIER;
 192        if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
 193                /*
 194                 * Use an interrupt for precise measurement of duration,
 195                 * otherwise we rely on someone else retiring all the requests
 196                 * which may delay the signaling (i.e. we will likely wait
 197                 * until the background request retirement running every
 198                 * second or two).
 199                 */
 200                BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
 201                dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
 202                rq->duration.emitted = ktime_get();
 203        }
 204
 205        /* Expose ourselves to the world */
 206        __queue_and_release_pm(rq, ce->timeline, engine);
 207
 208        result = false;
 209out_unlock:
 210        __timeline_mark_unlock(ce, flags);
 211        return result;
 212}
 213
 214static void call_idle_barriers(struct intel_engine_cs *engine)
 215{
 216        struct llist_node *node, *next;
 217
 218        llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
 219                struct dma_fence_cb *cb =
 220                        container_of((struct list_head *)node,
 221                                     typeof(*cb), node);
 222
 223                cb->func(ERR_PTR(-EAGAIN), cb);
 224        }
 225}
 226
 227static int __engine_park(struct intel_wakeref *wf)
 228{
 229        struct intel_engine_cs *engine =
 230                container_of(wf, typeof(*engine), wakeref);
 231
 232        engine->saturated = 0;
 233
 234        /*
 235         * If one and only one request is completed between pm events,
 236         * we know that we are inside the kernel context and it is
 237         * safe to power down. (We are paranoid in case that runtime
 238         * suspend causes corruption to the active context image, and
 239         * want to avoid that impacting userspace.)
 240         */
 241        if (!switch_to_kernel_context(engine))
 242                return -EBUSY;
 243
 244        ENGINE_TRACE(engine, "parked\n");
 245
 246        call_idle_barriers(engine); /* cleanup after wedging */
 247
 248        intel_engine_park_heartbeat(engine);
 249        intel_engine_disarm_breadcrumbs(engine);
 250
 251        /* Must be reset upon idling, or we may miss the busy wakeup. */
 252        GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
 253
 254        if (engine->park)
 255                engine->park(engine);
 256
 257        engine->execlists.no_priolist = false;
 258
 259        /* While gt calls i915_vma_parked(), we have to break the lock cycle */
 260        intel_gt_pm_put_async(engine->gt);
 261        return 0;
 262}
 263
 264static const struct intel_wakeref_ops wf_ops = {
 265        .get = __engine_unpark,
 266        .put = __engine_park,
 267};
 268
 269void intel_engine_init__pm(struct intel_engine_cs *engine)
 270{
 271        struct intel_runtime_pm *rpm = engine->uncore->rpm;
 272
 273        intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
 274        intel_engine_init_heartbeat(engine);
 275}
 276
 277#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 278#include "selftest_engine_pm.c"
 279#endif
 280