1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4
5
6
7
8
9
10
11
12
13#define I915_RING_FREE_SPACE 64
14
15struct intel_hw_status_page {
16 u32 *page_addr;
17 unsigned int gfx_addr;
18 struct drm_i915_gem_object *obj;
19};
20
21#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
23
24#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
26
27#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
29
30#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
32
33#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35
36#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
37#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
38#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
39
40enum intel_ring_hangcheck_action { wait, active, kick, hung };
41
42struct intel_ring_hangcheck {
43 bool deadlock;
44 u32 seqno;
45 u32 acthd;
46 int score;
47 enum intel_ring_hangcheck_action action;
48};
49
50struct intel_ring_buffer {
51 const char *name;
52 enum intel_ring_id {
53 RCS = 0x0,
54 VCS,
55 BCS,
56 VECS,
57 } id;
58#define I915_NUM_RINGS 4
59 u32 mmio_base;
60 void __iomem *virtual_start;
61 struct drm_device *dev;
62 struct drm_i915_gem_object *obj;
63
64 u32 head;
65 u32 tail;
66 int space;
67 int size;
68 int effective_size;
69 struct intel_hw_status_page status_page;
70
71
72
73
74
75
76
77
78
79 u32 last_retired_head;
80
81 struct {
82 u32 gt;
83 u32 pm;
84 } irq_refcount;
85 u32 irq_enable_mask;
86 u32 trace_irq_seqno;
87 u32 sync_seqno[I915_NUM_RINGS-1];
88 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
89 void (*irq_put)(struct intel_ring_buffer *ring);
90
91 int (*init)(struct intel_ring_buffer *ring);
92
93 void (*write_tail)(struct intel_ring_buffer *ring,
94 u32 value);
95 int __must_check (*flush)(struct intel_ring_buffer *ring,
96 u32 invalidate_domains,
97 u32 flush_domains);
98 int (*add_request)(struct intel_ring_buffer *ring);
99
100
101
102
103
104
105 u32 (*get_seqno)(struct intel_ring_buffer *ring,
106 bool lazy_coherency);
107 void (*set_seqno)(struct intel_ring_buffer *ring,
108 u32 seqno);
109 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
110 u32 offset, u32 length,
111 unsigned flags);
112#define I915_DISPATCH_SECURE 0x1
113#define I915_DISPATCH_PINNED 0x2
114 void (*cleanup)(struct intel_ring_buffer *ring);
115 int (*sync_to)(struct intel_ring_buffer *ring,
116 struct intel_ring_buffer *to,
117 u32 seqno);
118
119
120 u32 semaphore_register[I915_NUM_RINGS];
121
122 u32 signal_mbox[I915_NUM_RINGS];
123
124
125
126
127
128
129
130
131
132
133
134 struct list_head active_list;
135
136
137
138
139
140 struct list_head request_list;
141
142
143
144
145 u32 outstanding_lazy_request;
146 bool gpu_caches_dirty;
147 bool fbc_dirty;
148
149 wait_queue_head_t irq_queue;
150
151
152
153
154 bool itlb_before_ctx_switch;
155 struct i915_hw_context *default_context;
156 struct i915_hw_context *last_context;
157
158 struct intel_ring_hangcheck hangcheck;
159
160 void *private;
161};
162
163static inline bool
164intel_ring_initialized(struct intel_ring_buffer *ring)
165{
166 return ring->obj != NULL;
167}
168
169static inline unsigned
170intel_ring_flag(struct intel_ring_buffer *ring)
171{
172 return 1 << ring->id;
173}
174
175static inline u32
176intel_ring_sync_index(struct intel_ring_buffer *ring,
177 struct intel_ring_buffer *other)
178{
179 int idx;
180
181
182
183
184
185
186
187 idx = (other - ring) - 1;
188 if (idx < 0)
189 idx += I915_NUM_RINGS;
190
191 return idx;
192}
193
194static inline u32
195intel_read_status_page(struct intel_ring_buffer *ring,
196 int reg)
197{
198
199 barrier();
200 return ring->status_page.page_addr[reg];
201}
202
203static inline void
204intel_write_status_page(struct intel_ring_buffer *ring,
205 int reg, u32 value)
206{
207 ring->status_page.page_addr[reg] = value;
208}
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225#define I915_GEM_HWS_INDEX 0x20
226#define I915_GEM_HWS_SCRATCH_INDEX 0x30
227#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
228
229void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
230
231int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
232static inline void intel_ring_emit(struct intel_ring_buffer *ring,
233 u32 data)
234{
235 iowrite32(data, ring->virtual_start + ring->tail);
236 ring->tail += 4;
237}
238void intel_ring_advance(struct intel_ring_buffer *ring);
239int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
240void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
241int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
242int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
243
244int intel_init_render_ring_buffer(struct drm_device *dev);
245int intel_init_bsd_ring_buffer(struct drm_device *dev);
246int intel_init_blt_ring_buffer(struct drm_device *dev);
247int intel_init_vebox_ring_buffer(struct drm_device *dev);
248
249u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
250void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
251
252static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
253{
254 return ring->tail;
255}
256
257static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
258{
259 BUG_ON(ring->outstanding_lazy_request == 0);
260 return ring->outstanding_lazy_request;
261}
262
263static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
264{
265 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
266 ring->trace_irq_seqno = seqno;
267}
268
269
270int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
271
272#endif
273