linux/drivers/gpu/drm/i915/selftests/igt_spinner.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2018 Intel Corporation
   5 */
   6#include "gt/intel_gt.h"
   7
   8#include "gem/selftests/igt_gem_utils.h"
   9
  10#include "igt_spinner.h"
  11
  12int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
  13{
  14        unsigned int mode;
  15        void *vaddr;
  16        int err;
  17
  18        memset(spin, 0, sizeof(*spin));
  19        spin->gt = gt;
  20
  21        spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
  22        if (IS_ERR(spin->hws)) {
  23                err = PTR_ERR(spin->hws);
  24                goto err;
  25        }
  26
  27        spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
  28        if (IS_ERR(spin->obj)) {
  29                err = PTR_ERR(spin->obj);
  30                goto err_hws;
  31        }
  32
  33        i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
  34        vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
  35        if (IS_ERR(vaddr)) {
  36                err = PTR_ERR(vaddr);
  37                goto err_obj;
  38        }
  39        spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
  40
  41        mode = i915_coherent_map_type(gt->i915);
  42        vaddr = i915_gem_object_pin_map(spin->obj, mode);
  43        if (IS_ERR(vaddr)) {
  44                err = PTR_ERR(vaddr);
  45                goto err_unpin_hws;
  46        }
  47        spin->batch = vaddr;
  48
  49        return 0;
  50
  51err_unpin_hws:
  52        i915_gem_object_unpin_map(spin->hws);
  53err_obj:
  54        i915_gem_object_put(spin->obj);
  55err_hws:
  56        i915_gem_object_put(spin->hws);
  57err:
  58        return err;
  59}
  60
  61static unsigned int seqno_offset(u64 fence)
  62{
  63        return offset_in_page(sizeof(u32) * fence);
  64}
  65
  66static u64 hws_address(const struct i915_vma *hws,
  67                       const struct i915_request *rq)
  68{
  69        return hws->node.start + seqno_offset(rq->fence.context);
  70}
  71
  72static int move_to_active(struct i915_vma *vma,
  73                          struct i915_request *rq,
  74                          unsigned int flags)
  75{
  76        int err;
  77
  78        i915_vma_lock(vma);
  79        err = i915_request_await_object(rq, vma->obj,
  80                                        flags & EXEC_OBJECT_WRITE);
  81        if (err == 0)
  82                err = i915_vma_move_to_active(vma, rq, flags);
  83        i915_vma_unlock(vma);
  84
  85        return err;
  86}
  87
  88struct i915_request *
  89igt_spinner_create_request(struct igt_spinner *spin,
  90                           struct intel_context *ce,
  91                           u32 arbitration_command)
  92{
  93        struct intel_engine_cs *engine = ce->engine;
  94        struct i915_request *rq = NULL;
  95        struct i915_vma *hws, *vma;
  96        unsigned int flags;
  97        u32 *batch;
  98        int err;
  99
 100        GEM_BUG_ON(spin->gt != ce->vm->gt);
 101
 102        if (!intel_engine_can_store_dword(ce->engine))
 103                return ERR_PTR(-ENODEV);
 104
 105        vma = i915_vma_instance(spin->obj, ce->vm, NULL);
 106        if (IS_ERR(vma))
 107                return ERR_CAST(vma);
 108
 109        hws = i915_vma_instance(spin->hws, ce->vm, NULL);
 110        if (IS_ERR(hws))
 111                return ERR_CAST(hws);
 112
 113        err = i915_vma_pin(vma, 0, 0, PIN_USER);
 114        if (err)
 115                return ERR_PTR(err);
 116
 117        err = i915_vma_pin(hws, 0, 0, PIN_USER);
 118        if (err)
 119                goto unpin_vma;
 120
 121        rq = intel_context_create_request(ce);
 122        if (IS_ERR(rq)) {
 123                err = PTR_ERR(rq);
 124                goto unpin_hws;
 125        }
 126
 127        err = move_to_active(vma, rq, 0);
 128        if (err)
 129                goto cancel_rq;
 130
 131        err = move_to_active(hws, rq, 0);
 132        if (err)
 133                goto cancel_rq;
 134
 135        batch = spin->batch;
 136
 137        if (INTEL_GEN(rq->engine->i915) >= 8) {
 138                *batch++ = MI_STORE_DWORD_IMM_GEN4;
 139                *batch++ = lower_32_bits(hws_address(hws, rq));
 140                *batch++ = upper_32_bits(hws_address(hws, rq));
 141        } else if (INTEL_GEN(rq->engine->i915) >= 6) {
 142                *batch++ = MI_STORE_DWORD_IMM_GEN4;
 143                *batch++ = 0;
 144                *batch++ = hws_address(hws, rq);
 145        } else if (INTEL_GEN(rq->engine->i915) >= 4) {
 146                *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
 147                *batch++ = 0;
 148                *batch++ = hws_address(hws, rq);
 149        } else {
 150                *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
 151                *batch++ = hws_address(hws, rq);
 152        }
 153        *batch++ = rq->fence.seqno;
 154
 155        *batch++ = arbitration_command;
 156
 157        if (INTEL_GEN(rq->engine->i915) >= 8)
 158                *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
 159        else if (IS_HASWELL(rq->engine->i915))
 160                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
 161        else if (INTEL_GEN(rq->engine->i915) >= 6)
 162                *batch++ = MI_BATCH_BUFFER_START;
 163        else
 164                *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
 165        *batch++ = lower_32_bits(vma->node.start);
 166        *batch++ = upper_32_bits(vma->node.start);
 167
 168        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 169
 170        intel_gt_chipset_flush(engine->gt);
 171
 172        if (engine->emit_init_breadcrumb) {
 173                err = engine->emit_init_breadcrumb(rq);
 174                if (err)
 175                        goto cancel_rq;
 176        }
 177
 178        flags = 0;
 179        if (INTEL_GEN(rq->engine->i915) <= 5)
 180                flags |= I915_DISPATCH_SECURE;
 181        err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 182
 183cancel_rq:
 184        if (err) {
 185                i915_request_set_error_once(rq, err);
 186                i915_request_add(rq);
 187        }
 188unpin_hws:
 189        i915_vma_unpin(hws);
 190unpin_vma:
 191        i915_vma_unpin(vma);
 192        return err ? ERR_PTR(err) : rq;
 193}
 194
 195static u32
 196hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
 197{
 198        u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
 199
 200        return READ_ONCE(*seqno);
 201}
 202
 203void igt_spinner_end(struct igt_spinner *spin)
 204{
 205        *spin->batch = MI_BATCH_BUFFER_END;
 206        intel_gt_chipset_flush(spin->gt);
 207}
 208
 209void igt_spinner_fini(struct igt_spinner *spin)
 210{
 211        igt_spinner_end(spin);
 212
 213        i915_gem_object_unpin_map(spin->obj);
 214        i915_gem_object_put(spin->obj);
 215
 216        i915_gem_object_unpin_map(spin->hws);
 217        i915_gem_object_put(spin->hws);
 218}
 219
 220bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 221{
 222        return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
 223                                               rq->fence.seqno),
 224                             100) &&
 225                 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
 226                                            rq->fence.seqno),
 227                          50));
 228}
 229