linux/drivers/gpu/drm/i915/selftests/igt_spinner.c
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2018 Intel Corporation
   5 */
   6
   7#include "igt_spinner.h"
   8
   9int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
  10{
  11        unsigned int mode;
  12        void *vaddr;
  13        int err;
  14
  15        GEM_BUG_ON(INTEL_GEN(i915) < 8);
  16
  17        memset(spin, 0, sizeof(*spin));
  18        spin->i915 = i915;
  19
  20        spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
  21        if (IS_ERR(spin->hws)) {
  22                err = PTR_ERR(spin->hws);
  23                goto err;
  24        }
  25
  26        spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  27        if (IS_ERR(spin->obj)) {
  28                err = PTR_ERR(spin->obj);
  29                goto err_hws;
  30        }
  31
  32        i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
  33        vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
  34        if (IS_ERR(vaddr)) {
  35                err = PTR_ERR(vaddr);
  36                goto err_obj;
  37        }
  38        spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
  39
  40        mode = i915_coherent_map_type(i915);
  41        vaddr = i915_gem_object_pin_map(spin->obj, mode);
  42        if (IS_ERR(vaddr)) {
  43                err = PTR_ERR(vaddr);
  44                goto err_unpin_hws;
  45        }
  46        spin->batch = vaddr;
  47
  48        return 0;
  49
  50err_unpin_hws:
  51        i915_gem_object_unpin_map(spin->hws);
  52err_obj:
  53        i915_gem_object_put(spin->obj);
  54err_hws:
  55        i915_gem_object_put(spin->hws);
  56err:
  57        return err;
  58}
  59
  60static unsigned int seqno_offset(u64 fence)
  61{
  62        return offset_in_page(sizeof(u32) * fence);
  63}
  64
  65static u64 hws_address(const struct i915_vma *hws,
  66                       const struct i915_request *rq)
  67{
  68        return hws->node.start + seqno_offset(rq->fence.context);
  69}
  70
  71static int move_to_active(struct i915_vma *vma,
  72                          struct i915_request *rq,
  73                          unsigned int flags)
  74{
  75        int err;
  76
  77        err = i915_vma_move_to_active(vma, rq, flags);
  78        if (err)
  79                return err;
  80
  81        if (!i915_gem_object_has_active_reference(vma->obj)) {
  82                i915_gem_object_get(vma->obj);
  83                i915_gem_object_set_active_reference(vma->obj);
  84        }
  85
  86        return 0;
  87}
  88
  89struct i915_request *
  90igt_spinner_create_request(struct igt_spinner *spin,
  91                           struct i915_gem_context *ctx,
  92                           struct intel_engine_cs *engine,
  93                           u32 arbitration_command)
  94{
  95        struct i915_address_space *vm = &ctx->ppgtt->vm;
  96        struct i915_request *rq = NULL;
  97        struct i915_vma *hws, *vma;
  98        u32 *batch;
  99        int err;
 100
 101        vma = i915_vma_instance(spin->obj, vm, NULL);
 102        if (IS_ERR(vma))
 103                return ERR_CAST(vma);
 104
 105        hws = i915_vma_instance(spin->hws, vm, NULL);
 106        if (IS_ERR(hws))
 107                return ERR_CAST(hws);
 108
 109        err = i915_vma_pin(vma, 0, 0, PIN_USER);
 110        if (err)
 111                return ERR_PTR(err);
 112
 113        err = i915_vma_pin(hws, 0, 0, PIN_USER);
 114        if (err)
 115                goto unpin_vma;
 116
 117        rq = i915_request_alloc(engine, ctx);
 118        if (IS_ERR(rq)) {
 119                err = PTR_ERR(rq);
 120                goto unpin_hws;
 121        }
 122
 123        err = move_to_active(vma, rq, 0);
 124        if (err)
 125                goto cancel_rq;
 126
 127        err = move_to_active(hws, rq, 0);
 128        if (err)
 129                goto cancel_rq;
 130
 131        batch = spin->batch;
 132
 133        *batch++ = MI_STORE_DWORD_IMM_GEN4;
 134        *batch++ = lower_32_bits(hws_address(hws, rq));
 135        *batch++ = upper_32_bits(hws_address(hws, rq));
 136        *batch++ = rq->fence.seqno;
 137
 138        *batch++ = arbitration_command;
 139
 140        *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
 141        *batch++ = lower_32_bits(vma->node.start);
 142        *batch++ = upper_32_bits(vma->node.start);
 143        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
 144
 145        i915_gem_chipset_flush(spin->i915);
 146
 147        if (engine->emit_init_breadcrumb &&
 148            rq->timeline->has_initial_breadcrumb) {
 149                err = engine->emit_init_breadcrumb(rq);
 150                if (err)
 151                        goto cancel_rq;
 152        }
 153
 154        err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 155
 156cancel_rq:
 157        if (err) {
 158                i915_request_skip(rq, err);
 159                i915_request_add(rq);
 160        }
 161unpin_hws:
 162        i915_vma_unpin(hws);
 163unpin_vma:
 164        i915_vma_unpin(vma);
 165        return err ? ERR_PTR(err) : rq;
 166}
 167
 168static u32
 169hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
 170{
 171        u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
 172
 173        return READ_ONCE(*seqno);
 174}
 175
 176void igt_spinner_end(struct igt_spinner *spin)
 177{
 178        *spin->batch = MI_BATCH_BUFFER_END;
 179        i915_gem_chipset_flush(spin->i915);
 180}
 181
 182void igt_spinner_fini(struct igt_spinner *spin)
 183{
 184        igt_spinner_end(spin);
 185
 186        i915_gem_object_unpin_map(spin->obj);
 187        i915_gem_object_put(spin->obj);
 188
 189        i915_gem_object_unpin_map(spin->hws);
 190        i915_gem_object_put(spin->hws);
 191}
 192
 193bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
 194{
 195        return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
 196                                               rq->fence.seqno),
 197                             10) &&
 198                 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
 199                                            rq->fence.seqno),
 200                          1000));
 201}
 202