linux/drivers/gpu/drm/i915/selftests/igt_spinner.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2018 Intel Corporation
   5 */
   6#include "gt/intel_gpu_commands.h"
   7#include "gt/intel_gt.h"
   8
   9#include "gem/selftests/igt_gem_utils.h"
  10
  11#include "igt_spinner.h"
  12
  13int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
  14{
  15        int err;
  16
  17        memset(spin, 0, sizeof(*spin));
  18        spin->gt = gt;
  19
  20        spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
  21        if (IS_ERR(spin->hws)) {
  22                err = PTR_ERR(spin->hws);
  23                goto err;
  24        }
  25        i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
  26
  27        spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
  28        if (IS_ERR(spin->obj)) {
  29                err = PTR_ERR(spin->obj);
  30                goto err_hws;
  31        }
  32
  33        return 0;
  34
  35err_hws:
  36        i915_gem_object_put(spin->hws);
  37err:
  38        return err;
  39}
  40
  41static void *igt_spinner_pin_obj(struct intel_context *ce,
  42                                 struct i915_gem_ww_ctx *ww,
  43                                 struct drm_i915_gem_object *obj,
  44                                 unsigned int mode, struct i915_vma **vma)
  45{
  46        void *vaddr;
  47        int ret;
  48
  49        *vma = i915_vma_instance(obj, ce->vm, NULL);
  50        if (IS_ERR(*vma))
  51                return ERR_CAST(*vma);
  52
  53        ret = i915_gem_object_lock(obj, ww);
  54        if (ret)
  55                return ERR_PTR(ret);
  56
  57        vaddr = i915_gem_object_pin_map(obj, mode);
  58
  59        if (!ww)
  60                i915_gem_object_unlock(obj);
  61
  62        if (IS_ERR(vaddr))
  63                return vaddr;
  64
  65        if (ww)
  66                ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
  67        else
  68                ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
  69
  70        if (ret) {
  71                i915_gem_object_unpin_map(obj);
  72                return ERR_PTR(ret);
  73        }
  74
  75        return vaddr;
  76}
  77
  78int igt_spinner_pin(struct igt_spinner *spin,
  79                    struct intel_context *ce,
  80                    struct i915_gem_ww_ctx *ww)
  81{
  82        void *vaddr;
  83
  84        if (spin->ce && WARN_ON(spin->ce != ce))
  85                return -ENODEV;
  86        spin->ce = ce;
  87
  88        if (!spin->seqno) {
  89                vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
  90                if (IS_ERR(vaddr))
  91                        return PTR_ERR(vaddr);
  92
  93                spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
  94        }
  95
  96        if (!spin->batch) {
  97                unsigned int mode;
  98
  99                mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
 100                vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
 101                if (IS_ERR(vaddr))
 102                        return PTR_ERR(vaddr);
 103
 104                spin->batch = vaddr;
 105        }
 106
 107        return 0;
 108}
 109
 110static unsigned int seqno_offset(u64 fence)
 111{
 112        return offset_in_page(sizeof(u32) * fence);
 113}
 114
 115static u64 hws_address(const struct i915_vma *hws,
 116                       const struct i915_request *rq)
 117{
 118        return hws->node.start + seqno_offset(rq->fence.context);
 119}
 120
 121static int move_to_active(struct i915_vma *vma,
 122                          struct i915_request *rq,
 123                          unsigned int flags)
 124{
 125        int err;
 126
 127        i915_vma_lock(vma);
 128        err = i915_request_await_object(rq, vma->obj,
 129                                        flags & EXEC_OBJECT_WRITE);
 130        if (err == 0)
 131                err = i915_vma_move_to_active(vma, rq, flags);
 132        i915_vma_unlock(vma);
 133
 134        return err;
 135}
 136
 137struct i915_request *
 138igt_spinner_create_request(struct igt_spinner *spin,
 139                           struct intel_context *ce,
 140                           u32 arbitration_command)
 141{
 142        struct intel_engine_cs *engine = ce->engine;
 143        struct i915_request *rq = NULL;
 144        struct i915_vma *hws, *vma;
 145        unsigned int flags;
 146        u32 *batch;
 147        int err;
 148
 149        GEM_BUG_ON(spin->gt != ce->vm->gt);
 150
 151        if (!intel_engine_can_store_dword(ce->engine))
 152                return ERR_PTR(-ENODEV);
 153
 154        if (!spin->batch) {
 155                err = igt_spinner_pin(spin, ce, NULL);
 156                if (err)
 157                        return ERR_PTR(err);
 158        }
 159
 160        hws = spin->hws_vma;
 161        vma = spin->batch_vma;
 162
 163        rq = intel_context_create_request(ce);
 164        if (IS_ERR(rq))
 165                return ERR_CAST(rq);
 166
 167        err = move_to_active(vma, rq, 0);
 168        if (err)
 169                goto cancel_rq;
 170
 171        err = move_to_active(hws, rq, 0);
 172        if (err)
 173                goto cancel_rq;
 174
 175        batch = spin->batch;
 176
 177        if (GRAPHICS_VER(rq->engine->i915) >= 8) {
 178                *batch++ = MI_STORE_DWORD_IMM_GEN4;
 179                *batch++ = lower_32_bits(hws_address(hws, rq));
 180                *batch++ = upper_32_bits(hws_address(hws, rq));
 181        } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
 182                *batch++ = MI_STORE_DWORD_IMM_GEN4;
 183                *batch++ = 0;
 184                *batch++ = hws_address(hws, rq);
 185        } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
 186                *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 187                *batch++ = 0;
 188                *batch++ = hws_address(hws, rq);
 189        } else {
 190                *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
 191                *batch++ = hws_address(hws, rq);
 192        }
 193        *batch++ = rq->fence.seqno;
 194
 195        *batch++ = arbitration_command;
 196
 197        if (GRAPHICS_VER(rq->engine->i915) >= 8)
 198                *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
 199        else if (IS_HASWELL(rq->engine->i915))
 200                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
 201        else if (GRAPHICS_VER(rq->engine->i915) >= 6)
 202                *batch++ = MI_BATCH_BUFFER_START;
 203        else
 204                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
 205        *batch++ = lower_32_bits(vma->node.start);
 206        *batch++ = upper_32_bits(vma->node.start);
 207
 208        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 209
 210        intel_gt_chipset_flush(engine->gt);
 211
 212        if (engine->emit_init_breadcrumb) {
 213                err = engine->emit_init_breadcrumb(rq);
 214                if (err)
 215                        goto cancel_rq;
 216        }
 217
 218        flags = 0;
 219        if (GRAPHICS_VER(rq->engine->i915) <= 5)
 220                flags |= I915_DISPATCH_SECURE;
 221        err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 222
 223cancel_rq:
 224        if (err) {
 225                i915_request_set_error_once(rq, err);
 226                i915_request_add(rq);
 227        }
 228        return err ? ERR_PTR(err) : rq;
 229}
 230
 231static u32
 232hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
 233{
 234        u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
 235
 236        return READ_ONCE(*seqno);
 237}
 238
 239void igt_spinner_end(struct igt_spinner *spin)
 240{
 241        if (!spin->batch)
 242                return;
 243
 244        *spin->batch = MI_BATCH_BUFFER_END;
 245        intel_gt_chipset_flush(spin->gt);
 246}
 247
 248void igt_spinner_fini(struct igt_spinner *spin)
 249{
 250        igt_spinner_end(spin);
 251
 252        if (spin->batch) {
 253                i915_vma_unpin(spin->batch_vma);
 254                i915_gem_object_unpin_map(spin->obj);
 255        }
 256        i915_gem_object_put(spin->obj);
 257
 258        if (spin->seqno) {
 259                i915_vma_unpin(spin->hws_vma);
 260                i915_gem_object_unpin_map(spin->hws);
 261        }
 262        i915_gem_object_put(spin->hws);
 263}
 264
 265bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 266{
 267        if (i915_request_is_ready(rq))
 268                intel_engine_flush_submission(rq->engine);
 269
 270        return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
 271                                               rq->fence.seqno),
 272                             100) &&
 273                 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
 274                                            rq->fence.seqno),
 275                          50));
 276}
 277