linux/drivers/gpu/drm/i915/gt/intel_ring.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#ifndef INTEL_RING_H
   7#define INTEL_RING_H
   8
   9#include "i915_gem.h" /* GEM_BUG_ON */
  10#include "i915_request.h"
  11#include "intel_ring_types.h"
  12
  13struct intel_engine_cs;
  14
  15struct intel_ring *
  16intel_engine_create_ring(struct intel_engine_cs *engine, int size);
  17
  18u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
  19int intel_ring_cacheline_align(struct i915_request *rq);
  20
  21unsigned int intel_ring_update_space(struct intel_ring *ring);
  22
  23void __intel_ring_pin(struct intel_ring *ring);
  24int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww);
  25void intel_ring_unpin(struct intel_ring *ring);
  26void intel_ring_reset(struct intel_ring *ring, u32 tail);
  27
  28void intel_ring_free(struct kref *ref);
  29
  30static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
  31{
  32        kref_get(&ring->ref);
  33        return ring;
  34}
  35
  36static inline void intel_ring_put(struct intel_ring *ring)
  37{
  38        kref_put(&ring->ref, intel_ring_free);
  39}
  40
  41static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
  42{
  43        /* Dummy function.
  44         *
  45         * This serves as a placeholder in the code so that the reader
  46         * can compare against the preceding intel_ring_begin() and
  47         * check that the number of dwords emitted matches the space
  48         * reserved for the command packet (i.e. the value passed to
  49         * intel_ring_begin()).
  50         */
  51        GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
  52        GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */
  53}
  54
  55static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
  56{
  57        return pos & (ring->size - 1);
  58}
  59
  60static inline int intel_ring_direction(const struct intel_ring *ring,
  61                                       u32 next, u32 prev)
  62{
  63        typecheck(typeof(ring->size), next);
  64        typecheck(typeof(ring->size), prev);
  65        return (next - prev) << ring->wrap;
  66}
  67
  68static inline bool
  69intel_ring_offset_valid(const struct intel_ring *ring,
  70                        unsigned int pos)
  71{
  72        if (pos & -ring->size) /* must be strictly within the ring */
  73                return false;
  74
  75        if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
  76                return false;
  77
  78        return true;
  79}
  80
  81static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
  82{
  83        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
  84        u32 offset = addr - rq->ring->vaddr;
  85
  86        GEM_BUG_ON(offset > rq->ring->size);
  87        return intel_ring_wrap(rq->ring, offset);
  88}
  89
  90static inline void
  91assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
  92{
  93        unsigned int head = READ_ONCE(ring->head);
  94
  95        GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
  96
  97        /*
  98         * "Ring Buffer Use"
  99         *      Gen2 BSpec "1. Programming Environment" / 1.4.4.6
 100         *      Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
 101         *      Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
 102         * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
 103         * same cacheline, the Head Pointer must not be greater than the Tail
 104         * Pointer."
 105         *
 106         * We use ring->head as the last known location of the actual RING_HEAD,
 107         * it may have advanced but in the worst case it is equally the same
 108         * as ring->head and so we should never program RING_TAIL to advance
 109         * into the same cacheline as ring->head.
 110         */
 111#define cacheline(a) round_down(a, CACHELINE_BYTES)
 112        GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
 113#undef cacheline
 114}
 115
 116static inline unsigned int
 117intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
 118{
 119        /* Whilst writes to the tail are strictly order, there is no
 120         * serialisation between readers and the writers. The tail may be
 121         * read by i915_request_retire() just as it is being updated
 122         * by execlists, as although the breadcrumb is complete, the context
 123         * switch hasn't been seen.
 124         */
 125        assert_ring_tail_valid(ring, tail);
 126        ring->tail = tail;
 127        return tail;
 128}
 129
 130static inline unsigned int
 131__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
 132{
 133        /*
 134         * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
 135         * same cacheline, the Head Pointer must not be greater than the Tail
 136         * Pointer."
 137         */
 138        GEM_BUG_ON(!is_power_of_2(size));
 139        return (head - tail - CACHELINE_BYTES) & (size - 1);
 140}
 141
 142#endif /* INTEL_RING_H */
 143