1
2#ifndef _INTEL_RINGBUFFER_H_
3#define _INTEL_RINGBUFFER_H_
4
5#include <linux/hashtable.h>
6#include <linux/seqlock.h>
7
8#include "i915_gem_batch_pool.h"
9
10#include "i915_reg.h"
11#include "i915_pmu.h"
12#include "i915_request.h"
13#include "i915_selftest.h"
14#include "i915_timeline.h"
15#include "intel_gpu_commands.h"
16
17struct drm_printer;
18struct i915_sched_attr;
19
20#define I915_CMD_HASH_ORDER 9
21
22
23
24
25
26
27#define CACHELINE_BYTES 64
28#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
29
30struct intel_hw_status_page {
31 struct i915_vma *vma;
32 u32 *page_addr;
33 u32 ggtt_offset;
34};
35
36#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
37#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
38
39#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
40#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
41
42#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
43#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
44
45#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
46#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
47
48#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
49#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
50
51#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
52#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
53
54
55
56
57enum intel_engine_hangcheck_action {
58 ENGINE_IDLE = 0,
59 ENGINE_WAIT,
60 ENGINE_ACTIVE_SEQNO,
61 ENGINE_ACTIVE_HEAD,
62 ENGINE_ACTIVE_SUBUNITS,
63 ENGINE_WAIT_KICK,
64 ENGINE_DEAD,
65};
66
67static inline const char *
68hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
69{
70 switch (a) {
71 case ENGINE_IDLE:
72 return "idle";
73 case ENGINE_WAIT:
74 return "wait";
75 case ENGINE_ACTIVE_SEQNO:
76 return "active seqno";
77 case ENGINE_ACTIVE_HEAD:
78 return "active head";
79 case ENGINE_ACTIVE_SUBUNITS:
80 return "active subunits";
81 case ENGINE_WAIT_KICK:
82 return "wait kick";
83 case ENGINE_DEAD:
84 return "dead";
85 }
86
87 return "unknown";
88}
89
90#define I915_MAX_SLICES 3
91#define I915_MAX_SUBSLICES 8
92
93#define instdone_slice_mask(dev_priv__) \
94 (INTEL_GEN(dev_priv__) == 7 ? \
95 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
96
97#define instdone_subslice_mask(dev_priv__) \
98 (INTEL_GEN(dev_priv__) == 7 ? \
99 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
100
101#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
102 for ((slice__) = 0, (subslice__) = 0; \
103 (slice__) < I915_MAX_SLICES; \
104 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
105 (slice__) += ((subslice__) == 0)) \
106 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
107 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
108
109struct intel_instdone {
110 u32 instdone;
111
112 u32 slice_common;
113 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
114 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
115};
116
117struct intel_engine_hangcheck {
118 u64 acthd;
119 u32 seqno;
120 enum intel_engine_hangcheck_action action;
121 unsigned long action_timestamp;
122 int deadlock;
123 struct intel_instdone instdone;
124 struct i915_request *active_request;
125 bool stalled;
126};
127
128struct intel_ring {
129 struct i915_vma *vma;
130 void *vaddr;
131
132 struct i915_timeline *timeline;
133 struct list_head request_list;
134 struct list_head active_link;
135
136 u32 head;
137 u32 tail;
138 u32 emit;
139
140 u32 space;
141 u32 size;
142 u32 effective_size;
143};
144
145struct i915_gem_context;
146struct drm_i915_reg_table;
147
148
149
150
151
152
153
154
155
156
157
158
159struct i915_ctx_workarounds {
160 struct i915_wa_ctx_bb {
161 u32 offset;
162 u32 size;
163 } indirect_ctx, per_ctx;
164 struct i915_vma *vma;
165};
166
167struct i915_request;
168
169#define I915_MAX_VCS 4
170#define I915_MAX_VECS 2
171
172
173
174
175
176enum intel_engine_id {
177 RCS = 0,
178 BCS,
179 VCS,
180 VCS2,
181 VCS3,
182 VCS4,
183#define _VCS(n) (VCS + (n))
184 VECS,
185 VECS2
186#define _VECS(n) (VECS + (n))
187};
188
189struct i915_priolist {
190 struct rb_node node;
191 struct list_head requests;
192 int priority;
193};
194
195
196
197
198
199
200
201struct intel_engine_execlists {
202
203
204
205 struct tasklet_struct tasklet;
206
207
208
209
210 struct i915_priolist default_priolist;
211
212
213
214
215 bool no_priolist;
216
217
218
219
220
221
222 u32 __iomem *submit_reg;
223
224
225
226
227
228 u32 __iomem *ctrl_reg;
229
230
231
232
233
234
235
236
237
238
239
240 struct execlist_port {
241
242
243
244 struct i915_request *request_count;
245#define EXECLIST_COUNT_BITS 2
246#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
247#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
248#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
249#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
250#define port_set(p, packed) ((p)->request_count = (packed))
251#define port_isset(p) ((p)->request_count)
252#define port_index(p, execlists) ((p) - (execlists)->port)
253
254
255
256
257 GEM_DEBUG_DECL(u32 context_id);
258
259#define EXECLIST_MAX_PORTS 2
260 } port[EXECLIST_MAX_PORTS];
261
262
263
264
265
266
267
268
269
270
271 unsigned int active;
272#define EXECLISTS_ACTIVE_USER 0
273#define EXECLISTS_ACTIVE_PREEMPT 1
274#define EXECLISTS_ACTIVE_HWACK 2
275
276
277
278
279 unsigned int port_mask;
280
281
282
283
284
285
286
287
288
289 int queue_priority;
290
291
292
293
294 struct rb_root queue;
295
296
297
298
299 struct rb_node *first;
300
301
302
303
304 unsigned int fw_domains;
305
306
307
308
309 unsigned int csb_head;
310
311
312
313
314 bool csb_use_mmio;
315
316
317
318
319 u32 preempt_complete_status;
320};
321
322#define INTEL_ENGINE_CS_MAX_NAME 8
323
324struct intel_engine_cs {
325 struct drm_i915_private *i915;
326 char name[INTEL_ENGINE_CS_MAX_NAME];
327
328 enum intel_engine_id id;
329 unsigned int hw_id;
330 unsigned int guc_id;
331
332 u8 uabi_id;
333 u8 uabi_class;
334
335 u8 class;
336 u8 instance;
337 u32 context_size;
338 u32 mmio_base;
339
340 struct intel_ring *buffer;
341
342 struct i915_timeline timeline;
343
344 struct drm_i915_gem_object *default_state;
345
346 atomic_t irq_count;
347 unsigned long irq_posted;
348#define ENGINE_IRQ_BREADCRUMB 0
349#define ENGINE_IRQ_EXECLIST 1
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367 struct intel_breadcrumbs {
368 spinlock_t irq_lock;
369 struct intel_wait *irq_wait;
370
371 spinlock_t rb_lock;
372 struct rb_root waiters;
373 struct list_head signals;
374 struct task_struct *signaler;
375
376 struct timer_list fake_irq;
377 struct timer_list hangcheck;
378
379 unsigned int hangcheck_interrupts;
380 unsigned int irq_enabled;
381
382 bool irq_armed : 1;
383 I915_SELFTEST_DECLARE(bool mock : 1);
384 } breadcrumbs;
385
386 struct {
387
388
389
390
391
392
393 u32 enable;
394
395
396
397
398
399 unsigned int enable_count[I915_PMU_SAMPLE_BITS];
400
401
402
403
404
405#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
406 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
407 } pmu;
408
409
410
411
412
413
414 struct i915_gem_batch_pool batch_pool;
415
416 struct intel_hw_status_page status_page;
417 struct i915_ctx_workarounds wa_ctx;
418 struct i915_vma *scratch;
419
420 u32 irq_keep_mask;
421 u32 irq_enable_mask;
422 void (*irq_enable)(struct intel_engine_cs *engine);
423 void (*irq_disable)(struct intel_engine_cs *engine);
424
425 int (*init_hw)(struct intel_engine_cs *engine);
426 void (*reset_hw)(struct intel_engine_cs *engine,
427 struct i915_request *rq);
428
429 void (*park)(struct intel_engine_cs *engine);
430 void (*unpark)(struct intel_engine_cs *engine);
431
432 void (*set_default_submission)(struct intel_engine_cs *engine);
433
434 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
435 struct i915_gem_context *ctx);
436 void (*context_unpin)(struct intel_engine_cs *engine,
437 struct i915_gem_context *ctx);
438 int (*request_alloc)(struct i915_request *rq);
439 int (*init_context)(struct i915_request *rq);
440
441 int (*emit_flush)(struct i915_request *request, u32 mode);
442#define EMIT_INVALIDATE BIT(0)
443#define EMIT_FLUSH BIT(1)
444#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
445 int (*emit_bb_start)(struct i915_request *rq,
446 u64 offset, u32 length,
447 unsigned int dispatch_flags);
448#define I915_DISPATCH_SECURE BIT(0)
449#define I915_DISPATCH_PINNED BIT(1)
450#define I915_DISPATCH_RS BIT(2)
451 void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
452 int emit_breadcrumb_sz;
453
454
455
456
457
458
459
460 void (*submit_request)(struct i915_request *rq);
461
462
463
464
465
466
467
468 void (*schedule)(struct i915_request *request,
469 const struct i915_sched_attr *attr);
470
471
472
473
474
475
476
477 void (*cancel_requests)(struct intel_engine_cs *engine);
478
479
480
481
482
483
484
485 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
486 void (*cleanup)(struct intel_engine_cs *engine);
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525 struct {
526#define GEN6_SEMAPHORE_LAST VECS_HW
527#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
528#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
529 struct {
530
531 u32 wait[GEN6_NUM_SEMAPHORES];
532
533 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
534 } mbox;
535
536
537 int (*sync_to)(struct i915_request *rq,
538 struct i915_request *signal);
539 u32 *(*signal)(struct i915_request *rq, u32 *cs);
540 } semaphore;
541
542 struct intel_engine_execlists execlists;
543
544
545
546
547
548
549
550
551
552
553 struct i915_gem_context *last_retired_context;
554
555
556
557
558
559
560
561 struct i915_gem_context *legacy_active_context;
562 struct i915_hw_ppgtt *legacy_active_ppgtt;
563
564
565 struct atomic_notifier_head context_status_notifier;
566
567 struct intel_engine_hangcheck hangcheck;
568
569#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
570#define I915_ENGINE_SUPPORTS_STATS BIT(1)
571#define I915_ENGINE_HAS_PREEMPTION BIT(2)
572 unsigned int flags;
573
574
575
576
577
578 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
579
580
581
582
583 const struct drm_i915_reg_table *reg_tables;
584 int reg_table_count;
585
586
587
588
589
590
591
592
593
594
595
596 u32 (*get_cmd_length_mask)(u32 cmd_header);
597
598 struct {
599
600
601
602 seqlock_t lock;
603
604
605
606 unsigned int enabled;
607
608
609
610 unsigned int active;
611
612
613
614 ktime_t enabled_at;
615
616
617
618
619
620 ktime_t start;
621
622
623
624
625
626
627 ktime_t total;
628 } stats;
629};
630
631static inline bool
632intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
633{
634 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
635}
636
637static inline bool
638intel_engine_supports_stats(const struct intel_engine_cs *engine)
639{
640 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
641}
642
643static inline bool
644intel_engine_has_preemption(const struct intel_engine_cs *engine)
645{
646 return engine->flags & I915_ENGINE_HAS_PREEMPTION;
647}
648
649static inline bool __execlists_need_preempt(int prio, int last)
650{
651 return prio > max(0, last);
652}
653
654static inline void
655execlists_set_active(struct intel_engine_execlists *execlists,
656 unsigned int bit)
657{
658 __set_bit(bit, (unsigned long *)&execlists->active);
659}
660
661static inline bool
662execlists_set_active_once(struct intel_engine_execlists *execlists,
663 unsigned int bit)
664{
665 return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
666}
667
668static inline void
669execlists_clear_active(struct intel_engine_execlists *execlists,
670 unsigned int bit)
671{
672 __clear_bit(bit, (unsigned long *)&execlists->active);
673}
674
675static inline bool
676execlists_is_active(const struct intel_engine_execlists *execlists,
677 unsigned int bit)
678{
679 return test_bit(bit, (unsigned long *)&execlists->active);
680}
681
682void execlists_user_begin(struct intel_engine_execlists *execlists,
683 const struct execlist_port *port);
684void execlists_user_end(struct intel_engine_execlists *execlists);
685
686void
687execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
688
689void
690execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
691
692static inline unsigned int
693execlists_num_ports(const struct intel_engine_execlists * const execlists)
694{
695 return execlists->port_mask + 1;
696}
697
698static inline struct execlist_port *
699execlists_port_complete(struct intel_engine_execlists * const execlists,
700 struct execlist_port * const port)
701{
702 const unsigned int m = execlists->port_mask;
703
704 GEM_BUG_ON(port_index(port, execlists) != 0);
705 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
706
707 memmove(port, port + 1, m * sizeof(struct execlist_port));
708 memset(port + m, 0, sizeof(struct execlist_port));
709
710 return port;
711}
712
713static inline unsigned int
714intel_engine_flag(const struct intel_engine_cs *engine)
715{
716 return BIT(engine->id);
717}
718
719static inline u32
720intel_read_status_page(const struct intel_engine_cs *engine, int reg)
721{
722
723 return READ_ONCE(engine->status_page.page_addr[reg]);
724}
725
726static inline void
727intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
728{
729
730
731
732
733
734 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
735 mb();
736 clflush(&engine->status_page.page_addr[reg]);
737 engine->status_page.page_addr[reg] = value;
738 clflush(&engine->status_page.page_addr[reg]);
739 mb();
740 } else {
741 WRITE_ONCE(engine->status_page.page_addr[reg], value);
742 }
743}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761#define I915_GEM_HWS_INDEX 0x30
762#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
763#define I915_GEM_HWS_PREEMPT_INDEX 0x32
764#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
765#define I915_GEM_HWS_SCRATCH_INDEX 0x40
766#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
767
768#define I915_HWS_CSB_BUF0_INDEX 0x10
769#define I915_HWS_CSB_WRITE_INDEX 0x1f
770#define CNL_HWS_CSB_WRITE_INDEX 0x2f
771
772struct intel_ring *
773intel_engine_create_ring(struct intel_engine_cs *engine,
774 struct i915_timeline *timeline,
775 int size);
776int intel_ring_pin(struct intel_ring *ring,
777 struct drm_i915_private *i915,
778 unsigned int offset_bias);
779void intel_ring_reset(struct intel_ring *ring, u32 tail);
780unsigned int intel_ring_update_space(struct intel_ring *ring);
781void intel_ring_unpin(struct intel_ring *ring);
782void intel_ring_free(struct intel_ring *ring);
783
784void intel_engine_stop(struct intel_engine_cs *engine);
785void intel_engine_cleanup(struct intel_engine_cs *engine);
786
787void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
788
789int __must_check intel_ring_cacheline_align(struct i915_request *rq);
790
791int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
792u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
793
794static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
795{
796
797
798
799
800
801
802
803
804 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
805}
806
807static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
808{
809 return pos & (ring->size - 1);
810}
811
812static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
813{
814
815 u32 offset = addr - rq->ring->vaddr;
816 GEM_BUG_ON(offset > rq->ring->size);
817 return intel_ring_wrap(rq->ring, offset);
818}
819
820static inline void
821assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
822{
823
824
825
826
827 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
828 GEM_BUG_ON(tail >= ring->size);
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844#define cacheline(a) round_down(a, CACHELINE_BYTES)
845 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
846 tail < ring->head);
847#undef cacheline
848}
849
850static inline unsigned int
851intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
852{
853
854
855
856
857
858
859 assert_ring_tail_valid(ring, tail);
860 ring->tail = tail;
861 return tail;
862}
863
864void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
865
866void intel_engine_setup_common(struct intel_engine_cs *engine);
867int intel_engine_init_common(struct intel_engine_cs *engine);
868int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
869void intel_engine_cleanup_common(struct intel_engine_cs *engine);
870
871int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
872int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
873int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
874int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
875
876u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
877u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
878
879static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
880{
881 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
882}
883
884static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
885{
886
887
888
889
890
891
892
893 return READ_ONCE(engine->timeline.seqno);
894}
895
896void intel_engine_get_instdone(struct intel_engine_cs *engine,
897 struct intel_instdone *instdone);
898
899
900
901
902
903
904
905
906#define MIN_SPACE_FOR_ADD_REQUEST 336
907
908static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
909{
910 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
911}
912
913static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
914{
915 return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
916}
917
918
919int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
920
921static inline void intel_wait_init(struct intel_wait *wait,
922 struct i915_request *rq)
923{
924 wait->tsk = current;
925 wait->request = rq;
926}
927
928static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
929{
930 wait->tsk = current;
931 wait->seqno = seqno;
932}
933
934static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
935{
936 return wait->seqno;
937}
938
939static inline bool
940intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
941{
942 wait->seqno = seqno;
943 return intel_wait_has_seqno(wait);
944}
945
946static inline bool
947intel_wait_update_request(struct intel_wait *wait,
948 const struct i915_request *rq)
949{
950 return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
951}
952
953static inline bool
954intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
955{
956 return wait->seqno == seqno;
957}
958
959static inline bool
960intel_wait_check_request(const struct intel_wait *wait,
961 const struct i915_request *rq)
962{
963 return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
964}
965
966static inline bool intel_wait_complete(const struct intel_wait *wait)
967{
968 return RB_EMPTY_NODE(&wait->node);
969}
970
971bool intel_engine_add_wait(struct intel_engine_cs *engine,
972 struct intel_wait *wait);
973void intel_engine_remove_wait(struct intel_engine_cs *engine,
974 struct intel_wait *wait);
975bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
976void intel_engine_cancel_signaling(struct i915_request *request);
977
978static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
979{
980 return READ_ONCE(engine->breadcrumbs.irq_wait);
981}
982
983unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
984#define ENGINE_WAKEUP_WAITER BIT(0)
985#define ENGINE_WAKEUP_ASLEEP BIT(1)
986
987void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
988void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
989
990void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
991void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
992
993void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
994void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
995
996static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
997{
998 memset(batch, 0, 6 * sizeof(u32));
999
1000 batch[0] = GFX_OP_PIPE_CONTROL(6);
1001 batch[1] = flags;
1002 batch[2] = offset;
1003
1004 return batch + 6;
1005}
1006
1007static inline u32 *
1008gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
1009{
1010
1011 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1012
1013
1014
1015
1016
1017 *cs++ = GFX_OP_PIPE_CONTROL(6);
1018 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
1019 PIPE_CONTROL_QW_WRITE;
1020 *cs++ = gtt_offset;
1021 *cs++ = 0;
1022 *cs++ = value;
1023
1024 *cs++ = 0;
1025
1026 return cs;
1027}
1028
1029static inline u32 *
1030gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
1031{
1032
1033 GEM_BUG_ON(gtt_offset & (1 << 5));
1034
1035 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1036
1037 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1038 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
1039 *cs++ = 0;
1040 *cs++ = value;
1041
1042 return cs;
1043}
1044
1045bool intel_engine_is_idle(struct intel_engine_cs *engine);
1046bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1047
1048bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
1049
1050void intel_engines_park(struct drm_i915_private *i915);
1051void intel_engines_unpark(struct drm_i915_private *i915);
1052
1053void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1054unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1055
1056bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1057
1058__printf(3, 4)
1059void intel_engine_dump(struct intel_engine_cs *engine,
1060 struct drm_printer *m,
1061 const char *header, ...);
1062
1063struct intel_engine_cs *
1064intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
1065
1066static inline void intel_engine_context_in(struct intel_engine_cs *engine)
1067{
1068 unsigned long flags;
1069
1070 if (READ_ONCE(engine->stats.enabled) == 0)
1071 return;
1072
1073 write_seqlock_irqsave(&engine->stats.lock, flags);
1074
1075 if (engine->stats.enabled > 0) {
1076 if (engine->stats.active++ == 0)
1077 engine->stats.start = ktime_get();
1078 GEM_BUG_ON(engine->stats.active == 0);
1079 }
1080
1081 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1082}
1083
1084static inline void intel_engine_context_out(struct intel_engine_cs *engine)
1085{
1086 unsigned long flags;
1087
1088 if (READ_ONCE(engine->stats.enabled) == 0)
1089 return;
1090
1091 write_seqlock_irqsave(&engine->stats.lock, flags);
1092
1093 if (engine->stats.enabled > 0) {
1094 ktime_t last;
1095
1096 if (engine->stats.active && --engine->stats.active == 0) {
1097
1098
1099
1100
1101 last = ktime_sub(ktime_get(), engine->stats.start);
1102
1103 engine->stats.total = ktime_add(engine->stats.total,
1104 last);
1105 } else if (engine->stats.active == 0) {
1106
1107
1108
1109
1110
1111 last = ktime_sub(ktime_get(), engine->stats.enabled_at);
1112
1113 engine->stats.total = ktime_add(engine->stats.total,
1114 last);
1115 }
1116 }
1117
1118 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1119}
1120
1121int intel_enable_engine_stats(struct intel_engine_cs *engine);
1122void intel_disable_engine_stats(struct intel_engine_cs *engine);
1123
1124ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
1125
1126#endif
1127