linux/drivers/gpu/drm/i915/selftests/i915_active.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2018 Intel Corporation
   5 */
   6
   7#include <linux/kref.h>
   8
   9#include "gem/i915_gem_pm.h"
  10#include "gt/intel_gt.h"
  11
  12#include "i915_selftest.h"
  13
  14#include "igt_flush_test.h"
  15#include "lib_sw_fence.h"
  16
  17struct live_active {
  18        struct i915_active base;
  19        struct kref ref;
  20        bool retired;
  21};
  22
  23static void __live_get(struct live_active *active)
  24{
  25        kref_get(&active->ref);
  26}
  27
  28static void __live_free(struct live_active *active)
  29{
  30        i915_active_fini(&active->base);
  31        kfree(active);
  32}
  33
  34static void __live_release(struct kref *ref)
  35{
  36        struct live_active *active = container_of(ref, typeof(*active), ref);
  37
  38        __live_free(active);
  39}
  40
  41static void __live_put(struct live_active *active)
  42{
  43        kref_put(&active->ref, __live_release);
  44}
  45
  46static int __live_active(struct i915_active *base)
  47{
  48        struct live_active *active = container_of(base, typeof(*active), base);
  49
  50        __live_get(active);
  51        return 0;
  52}
  53
  54static void __live_retire(struct i915_active *base)
  55{
  56        struct live_active *active = container_of(base, typeof(*active), base);
  57
  58        active->retired = true;
  59        __live_put(active);
  60}
  61
  62static struct live_active *__live_alloc(struct drm_i915_private *i915)
  63{
  64        struct live_active *active;
  65
  66        active = kzalloc(sizeof(*active), GFP_KERNEL);
  67        if (!active)
  68                return NULL;
  69
  70        kref_init(&active->ref);
  71        i915_active_init(&active->base, __live_active, __live_retire, 0);
  72
  73        return active;
  74}
  75
  76static struct live_active *
  77__live_active_setup(struct drm_i915_private *i915)
  78{
  79        struct intel_engine_cs *engine;
  80        struct i915_sw_fence *submit;
  81        struct live_active *active;
  82        unsigned int count = 0;
  83        int err = 0;
  84
  85        active = __live_alloc(i915);
  86        if (!active)
  87                return ERR_PTR(-ENOMEM);
  88
  89        submit = heap_fence_create(GFP_KERNEL);
  90        if (!submit) {
  91                kfree(active);
  92                return ERR_PTR(-ENOMEM);
  93        }
  94
  95        err = i915_active_acquire(&active->base);
  96        if (err)
  97                goto out;
  98
  99        for_each_uabi_engine(engine, i915) {
 100                struct i915_request *rq;
 101
 102                rq = intel_engine_create_kernel_request(engine);
 103                if (IS_ERR(rq)) {
 104                        err = PTR_ERR(rq);
 105                        break;
 106                }
 107
 108                err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 109                                                       submit,
 110                                                       GFP_KERNEL);
 111                if (err >= 0)
 112                        err = i915_active_add_request(&active->base, rq);
 113                i915_request_add(rq);
 114                if (err) {
 115                        pr_err("Failed to track active ref!\n");
 116                        break;
 117                }
 118
 119                count++;
 120        }
 121
 122        i915_active_release(&active->base);
 123        if (READ_ONCE(active->retired) && count) {
 124                pr_err("i915_active retired before submission!\n");
 125                err = -EINVAL;
 126        }
 127        if (atomic_read(&active->base.count) != count) {
 128                pr_err("i915_active not tracking all requests, found %d, expected %d\n",
 129                       atomic_read(&active->base.count), count);
 130                err = -EINVAL;
 131        }
 132
 133out:
 134        i915_sw_fence_commit(submit);
 135        heap_fence_put(submit);
 136        if (err) {
 137                __live_put(active);
 138                active = ERR_PTR(err);
 139        }
 140
 141        return active;
 142}
 143
 144static int live_active_wait(void *arg)
 145{
 146        struct drm_i915_private *i915 = arg;
 147        struct live_active *active;
 148        int err = 0;
 149
 150        /* Check that we get a callback when requests retire upon waiting */
 151
 152        active = __live_active_setup(i915);
 153        if (IS_ERR(active))
 154                return PTR_ERR(active);
 155
 156        __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
 157        if (!READ_ONCE(active->retired)) {
 158                struct drm_printer p = drm_err_printer(__func__);
 159
 160                pr_err("i915_active not retired after waiting!\n");
 161                i915_active_print(&active->base, &p);
 162
 163                err = -EINVAL;
 164        }
 165
 166        __live_put(active);
 167
 168        if (igt_flush_test(i915))
 169                err = -EIO;
 170
 171        return err;
 172}
 173
 174static int live_active_retire(void *arg)
 175{
 176        struct drm_i915_private *i915 = arg;
 177        struct live_active *active;
 178        int err = 0;
 179
 180        /* Check that we get a callback when requests are indirectly retired */
 181
 182        active = __live_active_setup(i915);
 183        if (IS_ERR(active))
 184                return PTR_ERR(active);
 185
 186        /* waits for & retires all requests */
 187        if (igt_flush_test(i915))
 188                err = -EIO;
 189
 190        if (!READ_ONCE(active->retired)) {
 191                struct drm_printer p = drm_err_printer(__func__);
 192
 193                pr_err("i915_active not retired after flushing!\n");
 194                i915_active_print(&active->base, &p);
 195
 196                err = -EINVAL;
 197        }
 198
 199        __live_put(active);
 200
 201        return err;
 202}
 203
 204static int live_active_barrier(void *arg)
 205{
 206        struct drm_i915_private *i915 = arg;
 207        struct intel_engine_cs *engine;
 208        struct live_active *active;
 209        int err = 0;
 210
 211        /* Check that we get a callback when requests retire upon waiting */
 212
 213        active = __live_alloc(i915);
 214        if (!active)
 215                return -ENOMEM;
 216
 217        err = i915_active_acquire(&active->base);
 218        if (err)
 219                goto out;
 220
 221        for_each_uabi_engine(engine, i915) {
 222                err = i915_active_acquire_preallocate_barrier(&active->base,
 223                                                              engine);
 224                if (err)
 225                        break;
 226
 227                i915_active_acquire_barrier(&active->base);
 228        }
 229
 230        i915_active_release(&active->base);
 231        if (err)
 232                goto out;
 233
 234        __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
 235        if (!READ_ONCE(active->retired)) {
 236                pr_err("i915_active not retired after flushing barriers!\n");
 237                err = -EINVAL;
 238        }
 239
 240out:
 241        __live_put(active);
 242
 243        if (igt_flush_test(i915))
 244                err = -EIO;
 245
 246        return err;
 247}
 248
 249int i915_active_live_selftests(struct drm_i915_private *i915)
 250{
 251        static const struct i915_subtest tests[] = {
 252                SUBTEST(live_active_wait),
 253                SUBTEST(live_active_retire),
 254                SUBTEST(live_active_barrier),
 255        };
 256
 257        if (intel_gt_is_wedged(&i915->gt))
 258                return 0;
 259
 260        return i915_subtests(tests, i915);
 261}
 262
 263static struct intel_engine_cs *node_to_barrier(struct active_node *it)
 264{
 265        struct intel_engine_cs *engine;
 266
 267        if (!is_barrier(&it->base))
 268                return NULL;
 269
 270        engine = __barrier_to_engine(it);
 271        smp_rmb(); /* serialise with add_active_barriers */
 272        if (!is_barrier(&it->base))
 273                return NULL;
 274
 275        return engine;
 276}
 277
 278void i915_active_print(struct i915_active *ref, struct drm_printer *m)
 279{
 280        drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
 281        drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
 282        drm_printf(m, "\tpreallocated barriers? %s\n",
 283                   yesno(!llist_empty(&ref->preallocated_barriers)));
 284
 285        if (i915_active_acquire_if_busy(ref)) {
 286                struct active_node *it, *n;
 287
 288                rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 289                        struct intel_engine_cs *engine;
 290
 291                        engine = node_to_barrier(it);
 292                        if (engine) {
 293                                drm_printf(m, "\tbarrier: %s\n", engine->name);
 294                                continue;
 295                        }
 296
 297                        if (i915_active_fence_isset(&it->base)) {
 298                                drm_printf(m,
 299                                           "\ttimeline: %llx\n", it->timeline);
 300                                continue;
 301                        }
 302                }
 303
 304                i915_active_release(ref);
 305        }
 306}
 307
 308static void spin_unlock_wait(spinlock_t *lock)
 309{
 310        spin_lock_irq(lock);
 311        spin_unlock_irq(lock);
 312}
 313
 314static void active_flush(struct i915_active *ref,
 315                         struct i915_active_fence *active)
 316{
 317        struct dma_fence *fence;
 318
 319        fence = xchg(__active_fence_slot(active), NULL);
 320        if (!fence)
 321                return;
 322
 323        spin_lock_irq(fence->lock);
 324        __list_del_entry(&active->cb.node);
 325        spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
 326        atomic_dec(&ref->count);
 327
 328        GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
 329}
 330
 331void i915_active_unlock_wait(struct i915_active *ref)
 332{
 333        if (i915_active_acquire_if_busy(ref)) {
 334                struct active_node *it, *n;
 335
 336                /* Wait for all active callbacks */
 337                rcu_read_lock();
 338                active_flush(ref, &ref->excl);
 339                rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
 340                        active_flush(ref, &it->base);
 341                rcu_read_unlock();
 342
 343                i915_active_release(ref);
 344        }
 345
 346        /* And wait for the retire callback */
 347        spin_unlock_wait(&ref->tree_lock);
 348
 349        /* ... which may have been on a thread instead */
 350        flush_work(&ref->work);
 351}
 352