linux/drivers/gpu/drm/i915/gt/intel_gt_requests.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2019 Intel Corporation
   5 */
   6
   7#include <linux/workqueue.h>
   8
   9#include "i915_drv.h" /* for_each_engine() */
  10#include "i915_request.h"
  11#include "intel_engine_heartbeat.h"
  12#include "intel_gt.h"
  13#include "intel_gt_pm.h"
  14#include "intel_gt_requests.h"
  15#include "intel_timeline.h"
  16
  17static bool retire_requests(struct intel_timeline *tl)
  18{
  19        struct i915_request *rq, *rn;
  20
  21        list_for_each_entry_safe(rq, rn, &tl->requests, link)
  22                if (!i915_request_retire(rq))
  23                        return false;
  24
  25        /* And check nothing new was submitted */
  26        return !i915_active_fence_isset(&tl->last_request);
  27}
  28
  29static bool engine_active(const struct intel_engine_cs *engine)
  30{
  31        return !list_empty(&engine->kernel_context->timeline->requests);
  32}
  33
  34static bool flush_submission(struct intel_gt *gt, long timeout)
  35{
  36        struct intel_engine_cs *engine;
  37        enum intel_engine_id id;
  38        bool active = false;
  39
  40        if (!timeout)
  41                return false;
  42
  43        if (!intel_gt_pm_is_awake(gt))
  44                return false;
  45
  46        for_each_engine(engine, gt, id) {
  47                intel_engine_flush_submission(engine);
  48
  49                /* Flush the background retirement and idle barriers */
  50                flush_work(&engine->retire_work);
  51                flush_delayed_work(&engine->wakeref.work);
  52
  53                /* Is the idle barrier still outstanding? */
  54                active |= engine_active(engine);
  55        }
  56
  57        return active;
  58}
  59
  60static void engine_retire(struct work_struct *work)
  61{
  62        struct intel_engine_cs *engine =
  63                container_of(work, typeof(*engine), retire_work);
  64        struct intel_timeline *tl = xchg(&engine->retire, NULL);
  65
  66        do {
  67                struct intel_timeline *next = xchg(&tl->retire, NULL);
  68
  69                /*
  70                 * Our goal here is to retire _idle_ timelines as soon as
  71                 * possible (as they are idle, we do not expect userspace
  72                 * to be cleaning up anytime soon).
  73                 *
  74                 * If the timeline is currently locked, either it is being
  75                 * retired elsewhere or about to be!
  76                 */
  77                if (mutex_trylock(&tl->mutex)) {
  78                        retire_requests(tl);
  79                        mutex_unlock(&tl->mutex);
  80                }
  81                intel_timeline_put(tl);
  82
  83                GEM_BUG_ON(!next);
  84                tl = ptr_mask_bits(next, 1);
  85        } while (tl);
  86}
  87
  88static bool add_retire(struct intel_engine_cs *engine,
  89                       struct intel_timeline *tl)
  90{
  91#define STUB ((struct intel_timeline *)1)
  92        struct intel_timeline *first;
  93
  94        /*
  95         * We open-code a llist here to include the additional tag [BIT(0)]
  96         * so that we know when the timeline is already on a
  97         * retirement queue: either this engine or another.
  98         */
  99
 100        if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
 101                return false;
 102
 103        intel_timeline_get(tl);
 104        first = READ_ONCE(engine->retire);
 105        do
 106                tl->retire = ptr_pack_bits(first, 1, 1);
 107        while (!try_cmpxchg(&engine->retire, &first, tl));
 108
 109        return !first;
 110}
 111
 112void intel_engine_add_retire(struct intel_engine_cs *engine,
 113                             struct intel_timeline *tl)
 114{
 115        /* We don't deal well with the engine disappearing beneath us */
 116        GEM_BUG_ON(intel_engine_is_virtual(engine));
 117
 118        if (add_retire(engine, tl))
 119                schedule_work(&engine->retire_work);
 120}
 121
 122void intel_engine_init_retire(struct intel_engine_cs *engine)
 123{
 124        INIT_WORK(&engine->retire_work, engine_retire);
 125}
 126
 127void intel_engine_fini_retire(struct intel_engine_cs *engine)
 128{
 129        flush_work(&engine->retire_work);
 130        GEM_BUG_ON(engine->retire);
 131}
 132
 133long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 134{
 135        struct intel_gt_timelines *timelines = &gt->timelines;
 136        struct intel_timeline *tl, *tn;
 137        unsigned long active_count = 0;
 138        bool interruptible;
 139        LIST_HEAD(free);
 140
 141        interruptible = true;
 142        if (unlikely(timeout < 0))
 143                timeout = -timeout, interruptible = false;
 144
 145        flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
 146        spin_lock(&timelines->lock);
 147        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
 148                if (!mutex_trylock(&tl->mutex)) {
 149                        active_count++; /* report busy to caller, try again? */
 150                        continue;
 151                }
 152
 153                intel_timeline_get(tl);
 154                GEM_BUG_ON(!atomic_read(&tl->active_count));
 155                atomic_inc(&tl->active_count); /* pin the list element */
 156                spin_unlock(&timelines->lock);
 157
 158                if (timeout > 0) {
 159                        struct dma_fence *fence;
 160
 161                        fence = i915_active_fence_get(&tl->last_request);
 162                        if (fence) {
 163                                mutex_unlock(&tl->mutex);
 164
 165                                timeout = dma_fence_wait_timeout(fence,
 166                                                                 interruptible,
 167                                                                 timeout);
 168                                dma_fence_put(fence);
 169
 170                                /* Retirement is best effort */
 171                                if (!mutex_trylock(&tl->mutex)) {
 172                                        active_count++;
 173                                        goto out_active;
 174                                }
 175                        }
 176                }
 177
 178                if (!retire_requests(tl))
 179                        active_count++;
 180                mutex_unlock(&tl->mutex);
 181
 182out_active:     spin_lock(&timelines->lock);
 183
 184                /* Resume list iteration after reacquiring spinlock */
 185                list_safe_reset_next(tl, tn, link);
 186                if (atomic_dec_and_test(&tl->active_count))
 187                        list_del(&tl->link);
 188
 189                /* Defer the final release to after the spinlock */
 190                if (refcount_dec_and_test(&tl->kref.refcount)) {
 191                        GEM_BUG_ON(atomic_read(&tl->active_count));
 192                        list_add(&tl->link, &free);
 193                }
 194        }
 195        spin_unlock(&timelines->lock);
 196
 197        list_for_each_entry_safe(tl, tn, &free, link)
 198                __intel_timeline_free(&tl->kref);
 199
 200        if (flush_submission(gt, timeout)) /* Wait, there's more! */
 201                active_count++;
 202
 203        return active_count ? timeout : 0;
 204}
 205
 206int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
 207{
 208        /* If the device is asleep, we have no requests outstanding */
 209        if (!intel_gt_pm_is_awake(gt))
 210                return 0;
 211
 212        while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
 213                cond_resched();
 214                if (signal_pending(current))
 215                        return -EINTR;
 216        }
 217
 218        return timeout;
 219}
 220
 221static void retire_work_handler(struct work_struct *work)
 222{
 223        struct intel_gt *gt =
 224                container_of(work, typeof(*gt), requests.retire_work.work);
 225
 226        schedule_delayed_work(&gt->requests.retire_work,
 227                              round_jiffies_up_relative(HZ));
 228        intel_gt_retire_requests(gt);
 229}
 230
 231void intel_gt_init_requests(struct intel_gt *gt)
 232{
 233        INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
 234}
 235
 236void intel_gt_park_requests(struct intel_gt *gt)
 237{
 238        cancel_delayed_work(&gt->requests.retire_work);
 239}
 240
 241void intel_gt_unpark_requests(struct intel_gt *gt)
 242{
 243        schedule_delayed_work(&gt->requests.retire_work,
 244                              round_jiffies_up_relative(HZ));
 245}
 246
 247void intel_gt_fini_requests(struct intel_gt *gt)
 248{
 249        /* Wait until the work is marked as finished before unloading! */
 250        cancel_delayed_work_sync(&gt->requests.retire_work);
 251}
 252