linux/drivers/gpu/drm/i915/i915_gem_timeline.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2016 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#ifndef I915_GEM_TIMELINE_H
  26#define I915_GEM_TIMELINE_H
  27
  28#include <linux/list.h>
  29
  30#include "i915_utils.h"
  31#include "i915_gem_request.h"
  32#include "i915_syncmap.h"
  33
  34struct i915_gem_timeline;
  35
  36struct intel_timeline {
  37        u64 fence_context;
  38        u32 seqno;
  39
  40        /**
  41         * Count of outstanding requests, from the time they are constructed
  42         * to the moment they are retired. Loosely coupled to hardware.
  43         */
  44        u32 inflight_seqnos;
  45
  46        spinlock_t lock;
  47
  48        /**
  49         * List of breadcrumbs associated with GPU requests currently
  50         * outstanding.
  51         */
  52        struct list_head requests;
  53
  54        /* Contains an RCU guarded pointer to the last request. No reference is
  55         * held to the request, users must carefully acquire a reference to
  56         * the request using i915_gem_active_get_request_rcu(), or hold the
  57         * struct_mutex.
  58         */
  59        struct i915_gem_active last_request;
  60
  61        /**
  62         * We track the most recent seqno that we wait on in every context so
  63         * that we only have to emit a new await and dependency on a more
  64         * recent sync point. As the contexts may be executed out-of-order, we
  65         * have to track each individually and can not rely on an absolute
  66         * global_seqno. When we know that all tracked fences are completed
  67         * (i.e. when the driver is idle), we know that the syncmap is
  68         * redundant and we can discard it without loss of generality.
  69         */
  70        struct i915_syncmap *sync;
  71        /**
  72         * Separately to the inter-context seqno map above, we track the last
  73         * barrier (e.g. semaphore wait) to the global engine timelines. Note
  74         * that this tracks global_seqno rather than the context.seqno, and
  75         * so it is subject to the limitations of hw wraparound and that we
  76         * may need to revoke global_seqno (on pre-emption).
  77         */
  78        u32 global_sync[I915_NUM_ENGINES];
  79
  80        struct i915_gem_timeline *common;
  81};
  82
  83struct i915_gem_timeline {
  84        struct list_head link;
  85
  86        struct drm_i915_private *i915;
  87        const char *name;
  88
  89        struct intel_timeline engine[I915_NUM_ENGINES];
  90};
  91
  92int i915_gem_timeline_init(struct drm_i915_private *i915,
  93                           struct i915_gem_timeline *tl,
  94                           const char *name);
  95int i915_gem_timeline_init__global(struct drm_i915_private *i915);
  96void i915_gem_timelines_mark_idle(struct drm_i915_private *i915);
  97void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
  98
  99static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
 100                                            u64 context, u32 seqno)
 101{
 102        return i915_syncmap_set(&tl->sync, context, seqno);
 103}
 104
 105static inline int intel_timeline_sync_set(struct intel_timeline *tl,
 106                                          const struct dma_fence *fence)
 107{
 108        return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
 109}
 110
 111static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
 112                                                  u64 context, u32 seqno)
 113{
 114        return i915_syncmap_is_later(&tl->sync, context, seqno);
 115}
 116
 117static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
 118                                                const struct dma_fence *fence)
 119{
 120        return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
 121}
 122
 123#endif
 124