1
2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_
4
5#include <linux/hashtable.h>
6#include "i915_gem_batch_pool.h"
7#include "i915_gem_request.h"
8#include "i915_gem_timeline.h"
9#include "i915_pmu.h"
10#include "i915_selftest.h"
11
12struct drm_printer;
13
14#define I915_CMD_HASH_ORDER 9
15
16
17
18
19
20
21#define CACHELINE_BYTES 64
22#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
23
24struct intel_hw_status_page {
25 struct i915_vma *vma;
26 u32 *page_addr;
27 u32 ggtt_offset;
28};
29
30#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
31#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
32
33#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
34#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
35
36#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
37#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
38
39#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
40#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
41
42#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
43#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
44
45#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
46#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
47
48
49
50
51enum intel_engine_hangcheck_action {
52 ENGINE_IDLE = 0,
53 ENGINE_WAIT,
54 ENGINE_ACTIVE_SEQNO,
55 ENGINE_ACTIVE_HEAD,
56 ENGINE_ACTIVE_SUBUNITS,
57 ENGINE_WAIT_KICK,
58 ENGINE_DEAD,
59};
60
61static inline const char *
62hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
63{
64 switch (a) {
65 case ENGINE_IDLE:
66 return "idle";
67 case ENGINE_WAIT:
68 return "wait";
69 case ENGINE_ACTIVE_SEQNO:
70 return "active seqno";
71 case ENGINE_ACTIVE_HEAD:
72 return "active head";
73 case ENGINE_ACTIVE_SUBUNITS:
74 return "active subunits";
75 case ENGINE_WAIT_KICK:
76 return "wait kick";
77 case ENGINE_DEAD:
78 return "dead";
79 }
80
81 return "unknown";
82}
83
84#define I915_MAX_SLICES 3
85#define I915_MAX_SUBSLICES 3
86
87#define instdone_slice_mask(dev_priv__) \
88 (INTEL_GEN(dev_priv__) == 7 ? \
89 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
90
91#define instdone_subslice_mask(dev_priv__) \
92 (INTEL_GEN(dev_priv__) == 7 ? \
93 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
94
95#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
96 for ((slice__) = 0, (subslice__) = 0; \
97 (slice__) < I915_MAX_SLICES; \
98 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
99 (slice__) += ((subslice__) == 0)) \
100 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
101 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
102
103struct intel_instdone {
104 u32 instdone;
105
106 u32 slice_common;
107 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
108 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
109};
110
111struct intel_engine_hangcheck {
112 u64 acthd;
113 u32 seqno;
114 enum intel_engine_hangcheck_action action;
115 unsigned long action_timestamp;
116 int deadlock;
117 struct intel_instdone instdone;
118 struct drm_i915_gem_request *active_request;
119 bool stalled;
120};
121
122struct intel_ring {
123 struct i915_vma *vma;
124 void *vaddr;
125
126 struct list_head request_list;
127
128 u32 head;
129 u32 tail;
130 u32 emit;
131
132 u32 space;
133 u32 size;
134 u32 effective_size;
135};
136
137struct i915_gem_context;
138struct drm_i915_reg_table;
139
140
141
142
143
144
145
146
147
148
149
150
151struct i915_ctx_workarounds {
152 struct i915_wa_ctx_bb {
153 u32 offset;
154 u32 size;
155 } indirect_ctx, per_ctx;
156 struct i915_vma *vma;
157};
158
159struct drm_i915_gem_request;
160
161
162
163
164
165enum intel_engine_id {
166 RCS = 0,
167 BCS,
168 VCS,
169 VCS2,
170#define _VCS(n) (VCS + (n))
171 VECS
172};
173
174struct i915_priolist {
175 struct rb_node node;
176 struct list_head requests;
177 int priority;
178};
179
180
181
182
183
184
185
186struct intel_engine_execlists {
187
188
189
190 struct tasklet_struct tasklet;
191
192
193
194
195 struct i915_priolist default_priolist;
196
197
198
199
200 bool no_priolist;
201
202
203
204
205 u32 __iomem *elsp;
206
207
208
209
210
211
212
213
214
215
216
217 struct execlist_port {
218
219
220
221 struct drm_i915_gem_request *request_count;
222#define EXECLIST_COUNT_BITS 2
223#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
224#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
225#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
226#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
227#define port_set(p, packed) ((p)->request_count = (packed))
228#define port_isset(p) ((p)->request_count)
229#define port_index(p, execlists) ((p) - (execlists)->port)
230
231
232
233
234 GEM_DEBUG_DECL(u32 context_id);
235
236#define EXECLIST_MAX_PORTS 2
237 } port[EXECLIST_MAX_PORTS];
238
239
240
241
242
243
244
245
246
247
248 unsigned int active;
249#define EXECLISTS_ACTIVE_USER 0
250#define EXECLISTS_ACTIVE_PREEMPT 1
251#define EXECLISTS_ACTIVE_HWACK 2
252
253
254
255
256 unsigned int port_mask;
257
258
259
260
261 struct rb_root queue;
262
263
264
265
266 struct rb_node *first;
267
268
269
270
271 unsigned int fw_domains;
272
273
274
275
276 unsigned int csb_head;
277
278
279
280
281 bool csb_use_mmio;
282};
283
284#define INTEL_ENGINE_CS_MAX_NAME 8
285
286struct intel_engine_cs {
287 struct drm_i915_private *i915;
288 char name[INTEL_ENGINE_CS_MAX_NAME];
289
290 enum intel_engine_id id;
291 unsigned int hw_id;
292 unsigned int guc_id;
293
294 u8 uabi_id;
295 u8 uabi_class;
296
297 u8 class;
298 u8 instance;
299 u32 context_size;
300 u32 mmio_base;
301 unsigned int irq_shift;
302
303 struct intel_ring *buffer;
304 struct intel_timeline *timeline;
305
306 struct drm_i915_gem_object *default_state;
307
308 atomic_t irq_count;
309 unsigned long irq_posted;
310#define ENGINE_IRQ_BREADCRUMB 0
311#define ENGINE_IRQ_EXECLIST 1
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329 struct intel_breadcrumbs {
330 spinlock_t irq_lock;
331 struct intel_wait *irq_wait;
332
333 spinlock_t rb_lock;
334 struct rb_root waiters;
335 struct rb_root signals;
336 struct task_struct *signaler;
337 struct drm_i915_gem_request __rcu *first_signal;
338 struct timer_list fake_irq;
339 struct timer_list hangcheck;
340
341 unsigned int hangcheck_interrupts;
342 unsigned int irq_enabled;
343
344 bool irq_armed : 1;
345 I915_SELFTEST_DECLARE(bool mock : 1);
346 } breadcrumbs;
347
348 struct {
349
350
351
352
353
354
355 u32 enable;
356
357
358
359
360
361 unsigned int enable_count[I915_PMU_SAMPLE_BITS];
362
363
364
365
366
367#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
368 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
369 } pmu;
370
371
372
373
374
375
376 struct i915_gem_batch_pool batch_pool;
377
378 struct intel_hw_status_page status_page;
379 struct i915_ctx_workarounds wa_ctx;
380 struct i915_vma *scratch;
381
382 u32 irq_keep_mask;
383 u32 irq_enable_mask;
384 void (*irq_enable)(struct intel_engine_cs *engine);
385 void (*irq_disable)(struct intel_engine_cs *engine);
386
387 int (*init_hw)(struct intel_engine_cs *engine);
388 void (*reset_hw)(struct intel_engine_cs *engine,
389 struct drm_i915_gem_request *req);
390
391 void (*park)(struct intel_engine_cs *engine);
392 void (*unpark)(struct intel_engine_cs *engine);
393
394 void (*set_default_submission)(struct intel_engine_cs *engine);
395
396 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
397 struct i915_gem_context *ctx);
398 void (*context_unpin)(struct intel_engine_cs *engine,
399 struct i915_gem_context *ctx);
400 int (*request_alloc)(struct drm_i915_gem_request *req);
401 int (*init_context)(struct drm_i915_gem_request *req);
402
403 int (*emit_flush)(struct drm_i915_gem_request *request,
404 u32 mode);
405#define EMIT_INVALIDATE BIT(0)
406#define EMIT_FLUSH BIT(1)
407#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
408 int (*emit_bb_start)(struct drm_i915_gem_request *req,
409 u64 offset, u32 length,
410 unsigned int dispatch_flags);
411#define I915_DISPATCH_SECURE BIT(0)
412#define I915_DISPATCH_PINNED BIT(1)
413#define I915_DISPATCH_RS BIT(2)
414 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
415 u32 *cs);
416 int emit_breadcrumb_sz;
417
418
419
420
421
422
423
424 void (*submit_request)(struct drm_i915_gem_request *req);
425
426
427
428
429
430
431
432 void (*schedule)(struct drm_i915_gem_request *request,
433 int priority);
434
435
436
437
438
439
440
441 void (*cancel_requests)(struct intel_engine_cs *engine);
442
443
444
445
446
447
448
449 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
450 void (*cleanup)(struct intel_engine_cs *engine);
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489 struct {
490#define GEN6_SEMAPHORE_LAST VECS_HW
491#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
492#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
493 struct {
494
495 u32 wait[GEN6_NUM_SEMAPHORES];
496
497 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
498 } mbox;
499
500
501 int (*sync_to)(struct drm_i915_gem_request *req,
502 struct drm_i915_gem_request *signal);
503 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
504 } semaphore;
505
506 struct intel_engine_execlists execlists;
507
508
509
510
511
512
513
514
515
516
517 struct i915_gem_context *last_retired_context;
518
519
520
521
522
523
524
525 struct i915_gem_context *legacy_active_context;
526 struct i915_hw_ppgtt *legacy_active_ppgtt;
527
528
529 struct atomic_notifier_head context_status_notifier;
530
531 struct intel_engine_hangcheck hangcheck;
532
533#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
534#define I915_ENGINE_SUPPORTS_STATS BIT(1)
535 unsigned int flags;
536
537
538
539
540
541 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
542
543
544
545
546 const struct drm_i915_reg_table *reg_tables;
547 int reg_table_count;
548
549
550
551
552
553
554
555
556
557
558
559 u32 (*get_cmd_length_mask)(u32 cmd_header);
560
561 struct {
562
563
564
565 spinlock_t lock;
566
567
568
569 unsigned int enabled;
570
571
572
573 unsigned int active;
574
575
576
577 ktime_t enabled_at;
578
579
580
581
582
583 ktime_t start;
584
585
586
587
588
589
590 ktime_t total;
591 } stats;
592};
593
594static inline bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
595{
596 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
597}
598
599static inline bool intel_engine_supports_stats(struct intel_engine_cs *engine)
600{
601 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
602}
603
604static inline void
605execlists_set_active(struct intel_engine_execlists *execlists,
606 unsigned int bit)
607{
608 __set_bit(bit, (unsigned long *)&execlists->active);
609}
610
611static inline void
612execlists_clear_active(struct intel_engine_execlists *execlists,
613 unsigned int bit)
614{
615 __clear_bit(bit, (unsigned long *)&execlists->active);
616}
617
618static inline bool
619execlists_is_active(const struct intel_engine_execlists *execlists,
620 unsigned int bit)
621{
622 return test_bit(bit, (unsigned long *)&execlists->active);
623}
624
625void
626execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
627
628void
629execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
630
631static inline unsigned int
632execlists_num_ports(const struct intel_engine_execlists * const execlists)
633{
634 return execlists->port_mask + 1;
635}
636
637static inline void
638execlists_port_complete(struct intel_engine_execlists * const execlists,
639 struct execlist_port * const port)
640{
641 const unsigned int m = execlists->port_mask;
642
643 GEM_BUG_ON(port_index(port, execlists) != 0);
644 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
645
646 memmove(port, port + 1, m * sizeof(struct execlist_port));
647 memset(port + m, 0, sizeof(struct execlist_port));
648}
649
650static inline unsigned int
651intel_engine_flag(const struct intel_engine_cs *engine)
652{
653 return BIT(engine->id);
654}
655
656static inline u32
657intel_read_status_page(struct intel_engine_cs *engine, int reg)
658{
659
660 return READ_ONCE(engine->status_page.page_addr[reg]);
661}
662
663static inline void
664intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
665{
666
667
668
669
670
671 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
672 mb();
673 clflush(&engine->status_page.page_addr[reg]);
674 engine->status_page.page_addr[reg] = value;
675 clflush(&engine->status_page.page_addr[reg]);
676 mb();
677 } else {
678 WRITE_ONCE(engine->status_page.page_addr[reg], value);
679 }
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698#define I915_GEM_HWS_INDEX 0x30
699#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
700#define I915_GEM_HWS_PREEMPT_INDEX 0x32
701#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
702#define I915_GEM_HWS_SCRATCH_INDEX 0x40
703#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
704
705#define I915_HWS_CSB_BUF0_INDEX 0x10
706#define I915_HWS_CSB_WRITE_INDEX 0x1f
707#define CNL_HWS_CSB_WRITE_INDEX 0x2f
708
709struct intel_ring *
710intel_engine_create_ring(struct intel_engine_cs *engine, int size);
711int intel_ring_pin(struct intel_ring *ring,
712 struct drm_i915_private *i915,
713 unsigned int offset_bias);
714void intel_ring_reset(struct intel_ring *ring, u32 tail);
715unsigned int intel_ring_update_space(struct intel_ring *ring);
716void intel_ring_unpin(struct intel_ring *ring);
717void intel_ring_free(struct intel_ring *ring);
718
719void intel_engine_stop(struct intel_engine_cs *engine);
720void intel_engine_cleanup(struct intel_engine_cs *engine);
721
722void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
723
724int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
725
726int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
727u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
728 unsigned int n);
729
730static inline void
731intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
732{
733
734
735
736
737
738
739
740
741 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
742}
743
744static inline u32
745intel_ring_wrap(const struct intel_ring *ring, u32 pos)
746{
747 return pos & (ring->size - 1);
748}
749
750static inline u32
751intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
752{
753
754 u32 offset = addr - req->ring->vaddr;
755 GEM_BUG_ON(offset > req->ring->size);
756 return intel_ring_wrap(req->ring, offset);
757}
758
759static inline void
760assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
761{
762
763
764
765
766 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
767 GEM_BUG_ON(tail >= ring->size);
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783#define cacheline(a) round_down(a, CACHELINE_BYTES)
784 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
785 tail < ring->head);
786#undef cacheline
787}
788
789static inline unsigned int
790intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
791{
792
793
794
795
796
797
798 assert_ring_tail_valid(ring, tail);
799 ring->tail = tail;
800 return tail;
801}
802
803void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
804
805void intel_engine_setup_common(struct intel_engine_cs *engine);
806int intel_engine_init_common(struct intel_engine_cs *engine);
807int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
808void intel_engine_cleanup_common(struct intel_engine_cs *engine);
809
810int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
811int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
812int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
813int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
814
815u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
816u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
817
818static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
819{
820 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
821}
822
823static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
824{
825
826
827
828
829
830
831
832 return READ_ONCE(engine->timeline->seqno);
833}
834
835int init_workarounds_ring(struct intel_engine_cs *engine);
836int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
837
838void intel_engine_get_instdone(struct intel_engine_cs *engine,
839 struct intel_instdone *instdone);
840
841
842
843
844
845
846
847
848#define MIN_SPACE_FOR_ADD_REQUEST 336
849
850static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
851{
852 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
853}
854
855static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
856{
857 return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
858}
859
860
861int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
862
863static inline void intel_wait_init(struct intel_wait *wait,
864 struct drm_i915_gem_request *rq)
865{
866 wait->tsk = current;
867 wait->request = rq;
868}
869
870static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
871{
872 wait->tsk = current;
873 wait->seqno = seqno;
874}
875
876static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
877{
878 return wait->seqno;
879}
880
881static inline bool
882intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
883{
884 wait->seqno = seqno;
885 return intel_wait_has_seqno(wait);
886}
887
888static inline bool
889intel_wait_update_request(struct intel_wait *wait,
890 const struct drm_i915_gem_request *rq)
891{
892 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
893}
894
895static inline bool
896intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
897{
898 return wait->seqno == seqno;
899}
900
901static inline bool
902intel_wait_check_request(const struct intel_wait *wait,
903 const struct drm_i915_gem_request *rq)
904{
905 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
906}
907
908static inline bool intel_wait_complete(const struct intel_wait *wait)
909{
910 return RB_EMPTY_NODE(&wait->node);
911}
912
913bool intel_engine_add_wait(struct intel_engine_cs *engine,
914 struct intel_wait *wait);
915void intel_engine_remove_wait(struct intel_engine_cs *engine,
916 struct intel_wait *wait);
917void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
918 bool wakeup);
919void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
920
921static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
922{
923 return READ_ONCE(engine->breadcrumbs.irq_wait);
924}
925
926unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
927#define ENGINE_WAKEUP_WAITER BIT(0)
928#define ENGINE_WAKEUP_ASLEEP BIT(1)
929
930void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
931void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
932
933void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
934void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
935
936void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
937void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
938bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
939
940static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
941{
942 memset(batch, 0, 6 * sizeof(u32));
943
944 batch[0] = GFX_OP_PIPE_CONTROL(6);
945 batch[1] = flags;
946 batch[2] = offset;
947
948 return batch + 6;
949}
950
951static inline u32 *
952gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
953{
954
955 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
956
957
958
959
960
961 *cs++ = GFX_OP_PIPE_CONTROL(6);
962 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
963 PIPE_CONTROL_QW_WRITE;
964 *cs++ = gtt_offset;
965 *cs++ = 0;
966 *cs++ = value;
967
968 *cs++ = 0;
969
970 return cs;
971}
972
973static inline u32 *
974gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
975{
976
977 GEM_BUG_ON(gtt_offset & (1 << 5));
978
979 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
980
981 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
982 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
983 *cs++ = 0;
984 *cs++ = value;
985
986 return cs;
987}
988
989bool intel_engine_is_idle(struct intel_engine_cs *engine);
990bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
991
992bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
993
994void intel_engines_park(struct drm_i915_private *i915);
995void intel_engines_unpark(struct drm_i915_private *i915);
996
997void intel_engines_reset_default_submission(struct drm_i915_private *i915);
998unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
999
1000bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1001
1002__printf(3, 4)
1003void intel_engine_dump(struct intel_engine_cs *engine,
1004 struct drm_printer *m,
1005 const char *header, ...);
1006
1007struct intel_engine_cs *
1008intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
1009
1010static inline void intel_engine_context_in(struct intel_engine_cs *engine)
1011{
1012 unsigned long flags;
1013
1014 if (READ_ONCE(engine->stats.enabled) == 0)
1015 return;
1016
1017 spin_lock_irqsave(&engine->stats.lock, flags);
1018
1019 if (engine->stats.enabled > 0) {
1020 if (engine->stats.active++ == 0)
1021 engine->stats.start = ktime_get();
1022 GEM_BUG_ON(engine->stats.active == 0);
1023 }
1024
1025 spin_unlock_irqrestore(&engine->stats.lock, flags);
1026}
1027
1028static inline void intel_engine_context_out(struct intel_engine_cs *engine)
1029{
1030 unsigned long flags;
1031
1032 if (READ_ONCE(engine->stats.enabled) == 0)
1033 return;
1034
1035 spin_lock_irqsave(&engine->stats.lock, flags);
1036
1037 if (engine->stats.enabled > 0) {
1038 ktime_t last;
1039
1040 if (engine->stats.active && --engine->stats.active == 0) {
1041
1042
1043
1044
1045 last = ktime_sub(ktime_get(), engine->stats.start);
1046
1047 engine->stats.total = ktime_add(engine->stats.total,
1048 last);
1049 } else if (engine->stats.active == 0) {
1050
1051
1052
1053
1054
1055 last = ktime_sub(ktime_get(), engine->stats.enabled_at);
1056
1057 engine->stats.total = ktime_add(engine->stats.total,
1058 last);
1059 }
1060 }
1061
1062 spin_unlock_irqrestore(&engine->stats.lock, flags);
1063}
1064
1065int intel_enable_engine_stats(struct intel_engine_cs *engine);
1066void intel_disable_engine_stats(struct intel_engine_cs *engine);
1067
1068ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
1069
1070#endif
1071