1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4#include <linux/hashtable.h>
5
6#define I915_CMD_HASH_ORDER 9
7
8
9
10
11
12
13#define CACHELINE_BYTES 64
14
15
16
17
18
19
20
21
22
23
24#define I915_RING_FREE_SPACE 64
25
26struct intel_hw_status_page {
27 u32 *page_addr;
28 unsigned int gfx_addr;
29 struct drm_i915_gem_object *obj;
30};
31
32#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
33#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
34
35#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
36#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
37
38#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
39#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
40
41#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
42#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
43
44#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
45#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
46
47#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
48#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
49
50
51
52
53#define i915_semaphore_seqno_size sizeof(uint64_t)
54#define GEN8_SIGNAL_OFFSET(__ring, to) \
55 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
56 ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
57 (i915_semaphore_seqno_size * (to)))
58
59#define GEN8_WAIT_OFFSET(__ring, from) \
60 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
61 ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
62 (i915_semaphore_seqno_size * (__ring)->id))
63
64#define GEN8_RING_SEMAPHORE_INIT do { \
65 if (!dev_priv->semaphore_obj) { \
66 break; \
67 } \
68 ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
69 ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
70 ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
71 ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
72 ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
73 ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
74 } while(0)
75
76enum intel_ring_hangcheck_action {
77 HANGCHECK_IDLE = 0,
78 HANGCHECK_WAIT,
79 HANGCHECK_ACTIVE,
80 HANGCHECK_ACTIVE_LOOP,
81 HANGCHECK_KICK,
82 HANGCHECK_HUNG,
83};
84
85#define HANGCHECK_SCORE_RING_HUNG 31
86
87struct intel_ring_hangcheck {
88 u64 acthd;
89 u64 max_acthd;
90 u32 seqno;
91 int score;
92 enum intel_ring_hangcheck_action action;
93 int deadlock;
94};
95
96struct intel_ringbuffer {
97 struct drm_i915_gem_object *obj;
98 void __iomem *virtual_start;
99
100 struct intel_engine_cs *ring;
101
102 u32 head;
103 u32 tail;
104 int space;
105 int size;
106 int effective_size;
107
108
109
110
111
112
113
114
115
116 u32 last_retired_head;
117};
118
119struct intel_context;
120
121struct intel_engine_cs {
122 const char *name;
123 enum intel_ring_id {
124 RCS = 0x0,
125 VCS,
126 BCS,
127 VECS,
128 VCS2
129 } id;
130#define I915_NUM_RINGS 5
131#define LAST_USER_RING (VECS + 1)
132 u32 mmio_base;
133 struct drm_device *dev;
134 struct intel_ringbuffer *buffer;
135
136 struct intel_hw_status_page status_page;
137
138 unsigned irq_refcount;
139 u32 irq_enable_mask;
140 struct drm_i915_gem_request *trace_irq_req;
141 bool __must_check (*irq_get)(struct intel_engine_cs *ring);
142 void (*irq_put)(struct intel_engine_cs *ring);
143
144 int (*init_hw)(struct intel_engine_cs *ring);
145
146 int (*init_context)(struct intel_engine_cs *ring,
147 struct intel_context *ctx);
148
149 void (*write_tail)(struct intel_engine_cs *ring,
150 u32 value);
151 int __must_check (*flush)(struct intel_engine_cs *ring,
152 u32 invalidate_domains,
153 u32 flush_domains);
154 int (*add_request)(struct intel_engine_cs *ring);
155
156
157
158
159
160
161 u32 (*get_seqno)(struct intel_engine_cs *ring,
162 bool lazy_coherency);
163 void (*set_seqno)(struct intel_engine_cs *ring,
164 u32 seqno);
165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
166 u64 offset, u32 length,
167 unsigned dispatch_flags);
168#define I915_DISPATCH_SECURE 0x1
169#define I915_DISPATCH_PINNED 0x2
170 void (*cleanup)(struct intel_engine_cs *ring);
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 struct {
210 u32 sync_seqno[I915_NUM_RINGS-1];
211
212 union {
213 struct {
214
215 u32 wait[I915_NUM_RINGS];
216
217 u32 signal[I915_NUM_RINGS];
218 } mbox;
219 u64 signal_ggtt[I915_NUM_RINGS];
220 };
221
222
223 int (*sync_to)(struct intel_engine_cs *ring,
224 struct intel_engine_cs *to,
225 u32 seqno);
226 int (*signal)(struct intel_engine_cs *signaller,
227
228 unsigned int num_dwords);
229 } semaphore;
230
231
232 spinlock_t execlist_lock;
233 struct list_head execlist_queue;
234 struct list_head execlist_retired_req_list;
235 u8 next_context_status_buffer;
236 u32 irq_keep_mask;
237 int (*emit_request)(struct intel_ringbuffer *ringbuf,
238 struct drm_i915_gem_request *request);
239 int (*emit_flush)(struct intel_ringbuffer *ringbuf,
240 struct intel_context *ctx,
241 u32 invalidate_domains,
242 u32 flush_domains);
243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
244 struct intel_context *ctx,
245 u64 offset, unsigned dispatch_flags);
246
247
248
249
250
251
252
253
254
255
256
257 struct list_head active_list;
258
259
260
261
262
263 struct list_head request_list;
264
265
266
267
268 struct drm_i915_gem_request *outstanding_lazy_request;
269 bool gpu_caches_dirty;
270
271 wait_queue_head_t irq_queue;
272
273 struct intel_context *default_context;
274 struct intel_context *last_context;
275
276 struct intel_ring_hangcheck hangcheck;
277
278 struct {
279 struct drm_i915_gem_object *obj;
280 u32 gtt_offset;
281 volatile u32 *cpu_page;
282 } scratch;
283
284 bool needs_cmd_parser;
285
286
287
288
289
290 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
291
292
293
294
295 const u32 *reg_table;
296 int reg_count;
297
298
299
300
301
302 const u32 *master_reg_table;
303 int master_reg_count;
304
305
306
307
308
309
310
311
312
313
314
315 u32 (*get_cmd_length_mask)(u32 cmd_header);
316};
317
318bool intel_ring_initialized(struct intel_engine_cs *ring);
319
320static inline unsigned
321intel_ring_flag(struct intel_engine_cs *ring)
322{
323 return 1 << ring->id;
324}
325
326static inline u32
327intel_ring_sync_index(struct intel_engine_cs *ring,
328 struct intel_engine_cs *other)
329{
330 int idx;
331
332
333
334
335
336
337
338
339
340 idx = (other - ring) - 1;
341 if (idx < 0)
342 idx += I915_NUM_RINGS;
343
344 return idx;
345}
346
347static inline u32
348intel_read_status_page(struct intel_engine_cs *ring,
349 int reg)
350{
351
352 barrier();
353 return ring->status_page.page_addr[reg];
354}
355
356static inline void
357intel_write_status_page(struct intel_engine_cs *ring,
358 int reg, u32 value)
359{
360 ring->status_page.page_addr[reg] = value;
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379#define I915_GEM_HWS_INDEX 0x30
380#define I915_GEM_HWS_SCRATCH_INDEX 0x40
381#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
382
383void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
384int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
385 struct intel_ringbuffer *ringbuf);
386void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
387int intel_alloc_ringbuffer_obj(struct drm_device *dev,
388 struct intel_ringbuffer *ringbuf);
389
390void intel_stop_ring_buffer(struct intel_engine_cs *ring);
391void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
392
393int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
394int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
395static inline void intel_ring_emit(struct intel_engine_cs *ring,
396 u32 data)
397{
398 struct intel_ringbuffer *ringbuf = ring->buffer;
399 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
400 ringbuf->tail += 4;
401}
402static inline void intel_ring_advance(struct intel_engine_cs *ring)
403{
404 struct intel_ringbuffer *ringbuf = ring->buffer;
405 ringbuf->tail &= ringbuf->size - 1;
406}
407int __intel_ring_space(int head, int tail, int size);
408void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
409int intel_ring_space(struct intel_ringbuffer *ringbuf);
410bool intel_ring_stopped(struct intel_engine_cs *ring);
411void __intel_ring_advance(struct intel_engine_cs *ring);
412
413int __must_check intel_ring_idle(struct intel_engine_cs *ring);
414void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
415int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
416int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
417
418void intel_fini_pipe_control(struct intel_engine_cs *ring);
419int intel_init_pipe_control(struct intel_engine_cs *ring);
420
421int intel_init_render_ring_buffer(struct drm_device *dev);
422int intel_init_bsd_ring_buffer(struct drm_device *dev);
423int intel_init_bsd2_ring_buffer(struct drm_device *dev);
424int intel_init_blt_ring_buffer(struct drm_device *dev);
425int intel_init_vebox_ring_buffer(struct drm_device *dev);
426
427u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
428
429int init_workarounds_ring(struct intel_engine_cs *ring);
430
431static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
432{
433 return ringbuf->tail;
434}
435
436static inline struct drm_i915_gem_request *
437intel_ring_get_request(struct intel_engine_cs *ring)
438{
439 BUG_ON(ring->outstanding_lazy_request == NULL);
440 return ring->outstanding_lazy_request;
441}
442
443#endif
444