1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4#include <linux/hashtable.h>
5#include "i915_gem_batch_pool.h"
6
7#define I915_CMD_HASH_ORDER 9
8
9
10
11
12
13
14#define CACHELINE_BYTES 64
15#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
16
17
18
19
20
21
22
23
24
25
26#define I915_RING_FREE_SPACE 64
27
28struct intel_hw_status_page {
29 u32 *page_addr;
30 unsigned int gfx_addr;
31 struct drm_i915_gem_object *obj;
32};
33
34#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
36
37#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
39
40#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
41#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
42
43#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
45
46#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
48
49#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
50#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
51
52
53
54
55#define gen8_semaphore_seqno_size sizeof(uint64_t)
56#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
57 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
58#define GEN8_SIGNAL_OFFSET(__ring, to) \
59 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
60 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
61#define GEN8_WAIT_OFFSET(__ring, from) \
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
64
65enum intel_ring_hangcheck_action {
66 HANGCHECK_IDLE = 0,
67 HANGCHECK_WAIT,
68 HANGCHECK_ACTIVE,
69 HANGCHECK_KICK,
70 HANGCHECK_HUNG,
71};
72
73#define HANGCHECK_SCORE_RING_HUNG 31
74
75struct intel_ring_hangcheck {
76 u64 acthd;
77 unsigned long user_interrupts;
78 u32 seqno;
79 int score;
80 enum intel_ring_hangcheck_action action;
81 int deadlock;
82 u32 instdone[I915_NUM_INSTDONE_REG];
83};
84
85struct intel_ringbuffer {
86 struct drm_i915_gem_object *obj;
87 void __iomem *virtual_start;
88 struct i915_vma *vma;
89
90 struct intel_engine_cs *engine;
91 struct list_head link;
92
93 u32 head;
94 u32 tail;
95 int space;
96 int size;
97 int effective_size;
98
99
100
101
102
103
104
105
106
107 u32 last_retired_head;
108};
109
110struct i915_gem_context;
111struct drm_i915_reg_table;
112
113
114
115
116
117
118
119
120
121
122
123
124struct i915_ctx_workarounds {
125 struct i915_wa_ctx_bb {
126 u32 offset;
127 u32 size;
128 } indirect_ctx, per_ctx;
129 struct drm_i915_gem_object *obj;
130};
131
132struct drm_i915_gem_request;
133
134struct intel_engine_cs {
135 struct drm_i915_private *i915;
136 const char *name;
137 enum intel_engine_id {
138 RCS = 0,
139 BCS,
140 VCS,
141 VCS2,
142 VECS
143 } id;
144#define I915_NUM_ENGINES 5
145#define _VCS(n) (VCS + (n))
146 unsigned int exec_id;
147 unsigned int hw_id;
148 unsigned int guc_id;
149 u32 mmio_base;
150 struct intel_ringbuffer *buffer;
151 struct list_head buffers;
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169 struct intel_breadcrumbs {
170 struct task_struct *irq_seqno_bh;
171 unsigned long irq_wakeups;
172 bool irq_posted;
173
174 spinlock_t lock;
175 struct rb_root waiters;
176 struct rb_root signals;
177 struct intel_wait *first_wait;
178 struct task_struct *signaler;
179 struct drm_i915_gem_request *first_signal;
180 struct timer_list fake_irq;
181
182 bool irq_enabled : 1;
183 bool rpm_wakelock : 1;
184 } breadcrumbs;
185
186
187
188
189
190
191 struct i915_gem_batch_pool batch_pool;
192
193 struct intel_hw_status_page status_page;
194 struct i915_ctx_workarounds wa_ctx;
195
196 u32 irq_keep_mask;
197 u32 irq_enable_mask;
198 void (*irq_enable)(struct intel_engine_cs *ring);
199 void (*irq_disable)(struct intel_engine_cs *ring);
200
201 int (*init_hw)(struct intel_engine_cs *ring);
202
203 int (*init_context)(struct drm_i915_gem_request *req);
204
205 void (*write_tail)(struct intel_engine_cs *ring,
206 u32 value);
207 int __must_check (*flush)(struct drm_i915_gem_request *req,
208 u32 invalidate_domains,
209 u32 flush_domains);
210 int (*add_request)(struct drm_i915_gem_request *req);
211
212
213
214
215
216
217 void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
218 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
219 u64 offset, u32 length,
220 unsigned dispatch_flags);
221#define I915_DISPATCH_SECURE 0x1
222#define I915_DISPATCH_PINNED 0x2
223#define I915_DISPATCH_RS 0x4
224 void (*cleanup)(struct intel_engine_cs *ring);
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263 struct {
264 u32 sync_seqno[I915_NUM_ENGINES-1];
265
266 union {
267 struct {
268
269 u32 wait[I915_NUM_ENGINES];
270
271 i915_reg_t signal[I915_NUM_ENGINES];
272 } mbox;
273 u64 signal_ggtt[I915_NUM_ENGINES];
274 };
275
276
277 int (*sync_to)(struct drm_i915_gem_request *to_req,
278 struct intel_engine_cs *from,
279 u32 seqno);
280 int (*signal)(struct drm_i915_gem_request *signaller_req,
281
282 unsigned int num_dwords);
283 } semaphore;
284
285
286 struct tasklet_struct irq_tasklet;
287 spinlock_t execlist_lock;
288 struct list_head execlist_queue;
289 unsigned int fw_domains;
290 unsigned int next_context_status_buffer;
291 unsigned int idle_lite_restore_wa;
292 bool disable_lite_restore_wa;
293 u32 ctx_desc_template;
294 int (*emit_request)(struct drm_i915_gem_request *request);
295 int (*emit_flush)(struct drm_i915_gem_request *request,
296 u32 invalidate_domains,
297 u32 flush_domains);
298 int (*emit_bb_start)(struct drm_i915_gem_request *req,
299 u64 offset, unsigned dispatch_flags);
300
301
302
303
304
305
306
307
308
309
310
311 struct list_head active_list;
312
313
314
315
316
317 struct list_head request_list;
318
319
320
321
322
323
324 u32 last_submitted_seqno;
325
326 bool gpu_caches_dirty;
327
328 struct i915_gem_context *last_context;
329
330 struct intel_ring_hangcheck hangcheck;
331
332 struct {
333 struct drm_i915_gem_object *obj;
334 u32 gtt_offset;
335 } scratch;
336
337 bool needs_cmd_parser;
338
339
340
341
342
343 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
344
345
346
347
348 const struct drm_i915_reg_table *reg_tables;
349 int reg_table_count;
350
351
352
353
354
355
356
357
358
359
360
361 u32 (*get_cmd_length_mask)(u32 cmd_header);
362};
363
364static inline bool
365intel_engine_initialized(const struct intel_engine_cs *engine)
366{
367 return engine->i915 != NULL;
368}
369
370static inline unsigned
371intel_engine_flag(const struct intel_engine_cs *engine)
372{
373 return 1 << engine->id;
374}
375
376static inline u32
377intel_ring_sync_index(struct intel_engine_cs *engine,
378 struct intel_engine_cs *other)
379{
380 int idx;
381
382
383
384
385
386
387
388
389
390 idx = (other - engine) - 1;
391 if (idx < 0)
392 idx += I915_NUM_ENGINES;
393
394 return idx;
395}
396
397static inline void
398intel_flush_status_page(struct intel_engine_cs *engine, int reg)
399{
400 mb();
401 clflush(&engine->status_page.page_addr[reg]);
402 mb();
403}
404
405static inline u32
406intel_read_status_page(struct intel_engine_cs *engine, int reg)
407{
408
409 return READ_ONCE(engine->status_page.page_addr[reg]);
410}
411
412static inline void
413intel_write_status_page(struct intel_engine_cs *engine,
414 int reg, u32 value)
415{
416 engine->status_page.page_addr[reg] = value;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435#define I915_GEM_HWS_INDEX 0x30
436#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
437#define I915_GEM_HWS_SCRATCH_INDEX 0x40
438#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
439
440struct intel_ringbuffer *
441intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
442int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
443 struct intel_ringbuffer *ringbuf);
444void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
445void intel_ringbuffer_free(struct intel_ringbuffer *ring);
446
447void intel_stop_engine(struct intel_engine_cs *engine);
448void intel_cleanup_engine(struct intel_engine_cs *engine);
449
450int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
451
452int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
453int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
454static inline void intel_ring_emit(struct intel_engine_cs *engine,
455 u32 data)
456{
457 struct intel_ringbuffer *ringbuf = engine->buffer;
458 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
459 ringbuf->tail += 4;
460}
461static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
462 i915_reg_t reg)
463{
464 intel_ring_emit(engine, i915_mmio_reg_offset(reg));
465}
466static inline void intel_ring_advance(struct intel_engine_cs *engine)
467{
468 struct intel_ringbuffer *ringbuf = engine->buffer;
469 ringbuf->tail &= ringbuf->size - 1;
470}
471int __intel_ring_space(int head, int tail, int size);
472void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
473
474int __must_check intel_engine_idle(struct intel_engine_cs *engine);
475void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
476int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
477int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
478
479int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
480void intel_fini_pipe_control(struct intel_engine_cs *engine);
481
482int intel_init_render_ring_buffer(struct drm_device *dev);
483int intel_init_bsd_ring_buffer(struct drm_device *dev);
484int intel_init_bsd2_ring_buffer(struct drm_device *dev);
485int intel_init_blt_ring_buffer(struct drm_device *dev);
486int intel_init_vebox_ring_buffer(struct drm_device *dev);
487
488u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
489static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
490{
491 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
492}
493
494int init_workarounds_ring(struct intel_engine_cs *engine);
495
496static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
497{
498 return ringbuf->tail;
499}
500
501
502
503
504
505
506
507
508#define MIN_SPACE_FOR_ADD_REQUEST 336
509
510static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
511{
512 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
513}
514
515
516struct intel_wait {
517 struct rb_node node;
518 struct task_struct *tsk;
519 u32 seqno;
520};
521
522struct intel_signal_node {
523 struct rb_node node;
524 struct intel_wait wait;
525};
526
527int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
528
529static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
530{
531 wait->tsk = current;
532 wait->seqno = seqno;
533}
534
535static inline bool intel_wait_complete(const struct intel_wait *wait)
536{
537 return RB_EMPTY_NODE(&wait->node);
538}
539
540bool intel_engine_add_wait(struct intel_engine_cs *engine,
541 struct intel_wait *wait);
542void intel_engine_remove_wait(struct intel_engine_cs *engine,
543 struct intel_wait *wait);
544void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
545
546static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
547{
548 return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
549}
550
551static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
552{
553 bool wakeup = false;
554 struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
555
556
557
558
559
560
561
562
563 if (tsk)
564 wakeup = wake_up_process(tsk);
565 return wakeup;
566}
567
568void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
569void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
570unsigned int intel_kick_waiters(struct drm_i915_private *i915);
571unsigned int intel_kick_signalers(struct drm_i915_private *i915);
572
573#endif
574