1/* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7#ifndef __I915_TIMELINE_TYPES_H__ 8#define __I915_TIMELINE_TYPES_H__ 9 10#include <linux/list.h> 11#include <linux/kref.h> 12#include <linux/mutex.h> 13#include <linux/types.h> 14 15#include "i915_active_types.h" 16 17struct drm_i915_private; 18struct i915_vma; 19struct i915_timeline_cacheline; 20struct i915_syncmap; 21 22struct i915_timeline { 23 u64 fence_context; 24 u32 seqno; 25 26 struct mutex mutex; /* protects the flow of requests */ 27 28 unsigned int pin_count; 29 const u32 *hwsp_seqno; 30 struct i915_vma *hwsp_ggtt; 31 u32 hwsp_offset; 32 33 struct i915_timeline_cacheline *hwsp_cacheline; 34 35 bool has_initial_breadcrumb; 36 37 /** 38 * List of breadcrumbs associated with GPU requests currently 39 * outstanding. 40 */ 41 struct list_head requests; 42 43 /* Contains an RCU guarded pointer to the last request. No reference is 44 * held to the request, users must carefully acquire a reference to 45 * the request using i915_active_request_get_request_rcu(), or hold the 46 * struct_mutex. 47 */ 48 struct i915_active_request last_request; 49 50 /** 51 * We track the most recent seqno that we wait on in every context so 52 * that we only have to emit a new await and dependency on a more 53 * recent sync point. As the contexts may be executed out-of-order, we 54 * have to track each individually and can not rely on an absolute 55 * global_seqno. When we know that all tracked fences are completed 56 * (i.e. when the driver is idle), we know that the syncmap is 57 * redundant and we can discard it without loss of generality. 58 */ 59 struct i915_syncmap *sync; 60 61 struct list_head link; 62 struct drm_i915_private *i915; 63 64 struct kref kref; 65}; 66 67#endif /* __I915_TIMELINE_TYPES_H__ */ 68