1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
4#include <linux/hashtable.h>
5#include "i915_gem_batch_pool.h"
6#include "i915_gem_request.h"
7#include "i915_gem_timeline.h"
8
9#define I915_CMD_HASH_ORDER 9
10
11
12
13
14
15
16#define CACHELINE_BYTES 64
17#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
18
19
20
21
22
23
24
25
26
27
28#define I915_RING_FREE_SPACE 64
29
30struct intel_hw_status_page {
31 struct i915_vma *vma;
32 u32 *page_addr;
33 u32 ggtt_offset;
34};
35
36#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
37#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
38
39#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
40#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
41
42#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
43#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
44
45#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
46#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
47
48#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
49#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
50
51#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
52#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
53
54
55
56
57#define gen8_semaphore_seqno_size sizeof(uint64_t)
58#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
59 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
60#define GEN8_SIGNAL_OFFSET(__ring, to) \
61 (dev_priv->semaphore->node.start + \
62 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
63#define GEN8_WAIT_OFFSET(__ring, from) \
64 (dev_priv->semaphore->node.start + \
65 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
66
67enum intel_engine_hangcheck_action {
68 HANGCHECK_IDLE = 0,
69 HANGCHECK_WAIT,
70 HANGCHECK_ACTIVE,
71 HANGCHECK_KICK,
72 HANGCHECK_HUNG,
73};
74
75#define HANGCHECK_SCORE_RING_HUNG 31
76
77#define I915_MAX_SLICES 3
78#define I915_MAX_SUBSLICES 3
79
80#define instdone_slice_mask(dev_priv__) \
81 (INTEL_GEN(dev_priv__) == 7 ? \
82 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
83
84#define instdone_subslice_mask(dev_priv__) \
85 (INTEL_GEN(dev_priv__) == 7 ? \
86 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
87
88#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
89 for ((slice__) = 0, (subslice__) = 0; \
90 (slice__) < I915_MAX_SLICES; \
91 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
92 (slice__) += ((subslice__) == 0)) \
93 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
94 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
95
96struct intel_instdone {
97 u32 instdone;
98
99 u32 slice_common;
100 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
101 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
102};
103
104struct intel_engine_hangcheck {
105 u64 acthd;
106 u32 seqno;
107 int score;
108 enum intel_engine_hangcheck_action action;
109 int deadlock;
110 struct intel_instdone instdone;
111};
112
113struct intel_ring {
114 struct i915_vma *vma;
115 void *vaddr;
116
117 struct intel_engine_cs *engine;
118
119 struct list_head request_list;
120
121 u32 head;
122 u32 tail;
123 int space;
124 int size;
125 int effective_size;
126
127
128
129
130
131
132
133
134
135 u32 last_retired_head;
136};
137
138struct i915_gem_context;
139struct drm_i915_reg_table;
140
141
142
143
144
145
146
147
148
149
150
151
152struct i915_ctx_workarounds {
153 struct i915_wa_ctx_bb {
154 u32 offset;
155 u32 size;
156 } indirect_ctx, per_ctx;
157 struct i915_vma *vma;
158};
159
160struct drm_i915_gem_request;
161struct intel_render_state;
162
163struct intel_engine_cs {
164 struct drm_i915_private *i915;
165 const char *name;
166 enum intel_engine_id {
167 RCS = 0,
168 BCS,
169 VCS,
170 VCS2,
171 VECS
172 } id;
173#define _VCS(n) (VCS + (n))
174 unsigned int exec_id;
175 enum intel_engine_hw_id {
176 RCS_HW = 0,
177 VCS_HW,
178 BCS_HW,
179 VECS_HW,
180 VCS2_HW
181 } hw_id;
182 enum intel_engine_hw_id guc_id;
183 u32 mmio_base;
184 unsigned int irq_shift;
185 struct intel_ring *buffer;
186 struct intel_timeline *timeline;
187
188 struct intel_render_state *render_state;
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206 struct intel_breadcrumbs {
207 struct task_struct __rcu *irq_seqno_bh;
208 bool irq_posted;
209
210 spinlock_t lock;
211 struct rb_root waiters;
212 struct rb_root signals;
213 struct intel_wait *first_wait;
214 struct task_struct *signaler;
215 struct drm_i915_gem_request *first_signal;
216 struct timer_list fake_irq;
217 struct timer_list hangcheck;
218
219 unsigned long timeout;
220
221 bool irq_enabled : 1;
222 bool rpm_wakelock : 1;
223 } breadcrumbs;
224
225
226
227
228
229
230 struct i915_gem_batch_pool batch_pool;
231
232 struct intel_hw_status_page status_page;
233 struct i915_ctx_workarounds wa_ctx;
234 struct i915_vma *scratch;
235
236 u32 irq_keep_mask;
237 u32 irq_enable_mask;
238 void (*irq_enable)(struct intel_engine_cs *engine);
239 void (*irq_disable)(struct intel_engine_cs *engine);
240
241 int (*init_hw)(struct intel_engine_cs *engine);
242 void (*reset_hw)(struct intel_engine_cs *engine,
243 struct drm_i915_gem_request *req);
244
245 int (*init_context)(struct drm_i915_gem_request *req);
246
247 int (*emit_flush)(struct drm_i915_gem_request *request,
248 u32 mode);
249#define EMIT_INVALIDATE BIT(0)
250#define EMIT_FLUSH BIT(1)
251#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
252 int (*emit_bb_start)(struct drm_i915_gem_request *req,
253 u64 offset, u32 length,
254 unsigned int dispatch_flags);
255#define I915_DISPATCH_SECURE BIT(0)
256#define I915_DISPATCH_PINNED BIT(1)
257#define I915_DISPATCH_RS BIT(2)
258 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
259 u32 *out);
260 int emit_breadcrumb_sz;
261
262
263
264
265
266
267
268 void (*submit_request)(struct drm_i915_gem_request *req);
269
270
271
272
273
274
275
276 void (*schedule)(struct drm_i915_gem_request *request,
277 int priority);
278
279
280
281
282
283
284
285 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
286 void (*cleanup)(struct intel_engine_cs *engine);
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 struct {
326 union {
327#define GEN6_SEMAPHORE_LAST VECS_HW
328#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
329#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
330 struct {
331
332 u32 wait[GEN6_NUM_SEMAPHORES];
333
334 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
335 } mbox;
336 u64 signal_ggtt[I915_NUM_ENGINES];
337 };
338
339
340 int (*sync_to)(struct drm_i915_gem_request *req,
341 struct drm_i915_gem_request *signal);
342 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
343 } semaphore;
344
345
346 struct tasklet_struct irq_tasklet;
347 struct execlist_port {
348 struct drm_i915_gem_request *request;
349 unsigned int count;
350 } execlist_port[2];
351 struct rb_root execlist_queue;
352 struct rb_node *execlist_first;
353 unsigned int fw_domains;
354 bool disable_lite_restore_wa;
355 bool preempt_wa;
356 u32 ctx_desc_template;
357
358 struct i915_gem_context *last_context;
359
360 struct intel_engine_hangcheck hangcheck;
361
362 bool needs_cmd_parser;
363
364
365
366
367
368 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
369
370
371
372
373 const struct drm_i915_reg_table *reg_tables;
374 int reg_table_count;
375
376
377
378
379
380
381
382
383
384
385
386 u32 (*get_cmd_length_mask)(u32 cmd_header);
387};
388
389static inline unsigned
390intel_engine_flag(const struct intel_engine_cs *engine)
391{
392 return 1 << engine->id;
393}
394
395static inline void
396intel_flush_status_page(struct intel_engine_cs *engine, int reg)
397{
398 mb();
399 clflush(&engine->status_page.page_addr[reg]);
400 mb();
401}
402
403static inline u32
404intel_read_status_page(struct intel_engine_cs *engine, int reg)
405{
406
407 return READ_ONCE(engine->status_page.page_addr[reg]);
408}
409
410static inline void
411intel_write_status_page(struct intel_engine_cs *engine,
412 int reg, u32 value)
413{
414 engine->status_page.page_addr[reg] = value;
415}
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433#define I915_GEM_HWS_INDEX 0x30
434#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
435#define I915_GEM_HWS_SCRATCH_INDEX 0x40
436#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
437
438struct intel_ring *
439intel_engine_create_ring(struct intel_engine_cs *engine, int size);
440int intel_ring_pin(struct intel_ring *ring);
441void intel_ring_unpin(struct intel_ring *ring);
442void intel_ring_free(struct intel_ring *ring);
443
444void intel_engine_stop(struct intel_engine_cs *engine);
445void intel_engine_cleanup(struct intel_engine_cs *engine);
446
447void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
448
449int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
450
451int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
452int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
453
454static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
455{
456 *(uint32_t *)(ring->vaddr + ring->tail) = data;
457 ring->tail += 4;
458}
459
460static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
461{
462 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
463}
464
465static inline void intel_ring_advance(struct intel_ring *ring)
466{
467
468
469
470
471
472
473
474
475}
476
477static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
478{
479
480 u32 offset = addr - ring->vaddr;
481 return offset & (ring->size - 1);
482}
483
484int __intel_ring_space(int head, int tail, int size);
485void intel_ring_update_space(struct intel_ring *ring);
486
487void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
488
489void intel_engine_setup_common(struct intel_engine_cs *engine);
490int intel_engine_init_common(struct intel_engine_cs *engine);
491int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
492void intel_engine_cleanup_common(struct intel_engine_cs *engine);
493
494int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
495int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
496int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
497int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
498int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
499
500u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
501u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
502
503static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
504{
505 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
506}
507
508static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
509{
510
511
512
513
514
515
516
517 return READ_ONCE(engine->timeline->last_submitted_seqno);
518}
519
520int init_workarounds_ring(struct intel_engine_cs *engine);
521
522void intel_engine_get_instdone(struct intel_engine_cs *engine,
523 struct intel_instdone *instdone);
524
525
526
527
528
529
530
531
532#define MIN_SPACE_FOR_ADD_REQUEST 336
533
534static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
535{
536 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
537}
538
539
540int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
541
542static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
543{
544 wait->tsk = current;
545 wait->seqno = seqno;
546}
547
548static inline bool intel_wait_complete(const struct intel_wait *wait)
549{
550 return RB_EMPTY_NODE(&wait->node);
551}
552
553bool intel_engine_add_wait(struct intel_engine_cs *engine,
554 struct intel_wait *wait);
555void intel_engine_remove_wait(struct intel_engine_cs *engine,
556 struct intel_wait *wait);
557void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
558
559static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
560{
561 return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
562}
563
564static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
565{
566 bool wakeup = false;
567
568
569
570
571
572
573
574
575 if (intel_engine_has_waiter(engine)) {
576 struct task_struct *tsk;
577
578 rcu_read_lock();
579 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
580 if (tsk)
581 wakeup = wake_up_process(tsk);
582 rcu_read_unlock();
583 }
584
585 return wakeup;
586}
587
588void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
589void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
590unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
591
592#endif
593