1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef I915_REQUEST_H
26#define I915_REQUEST_H
27
28#include <linux/dma-fence.h>
29#include <linux/hrtimer.h>
30#include <linux/irq_work.h>
31#include <linux/llist.h>
32#include <linux/lockdep.h>
33
34#include "gem/i915_gem_context_types.h"
35#include "gt/intel_context_types.h"
36#include "gt/intel_engine_types.h"
37#include "gt/intel_timeline_types.h"
38
39#include "i915_gem.h"
40#include "i915_scheduler.h"
41#include "i915_selftest.h"
42#include "i915_sw_fence.h"
43
44#include <uapi/drm/i915_drm.h>
45
46struct drm_file;
47struct drm_i915_gem_object;
48struct drm_printer;
49struct i915_request;
50
51struct i915_capture_list {
52 struct i915_capture_list *next;
53 struct i915_vma *vma;
54};
55
56#define RQ_TRACE(rq, fmt, ...) do { \
57 const struct i915_request *rq__ = (rq); \
58 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
59 rq__->fence.context, rq__->fence.seqno, \
60 hwsp_seqno(rq__), ##__VA_ARGS__); \
61} while (0)
62
63enum {
64
65
66
67
68
69
70
71
72
73
74
75 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
76
77
78
79
80
81
82
83
84
85
86
87 I915_FENCE_FLAG_PQUEUE,
88
89
90
91
92
93
94 I915_FENCE_FLAG_HOLD,
95
96
97
98
99
100
101 I915_FENCE_FLAG_INITIAL_BREADCRUMB,
102
103
104
105
106
107
108
109 I915_FENCE_FLAG_SIGNAL,
110
111
112
113
114
115
116
117
118
119 I915_FENCE_FLAG_NOPREEMPT,
120
121
122
123
124
125
126
127
128
129
130
131 I915_FENCE_FLAG_SENTINEL,
132
133
134
135
136
137
138
139
140
141 I915_FENCE_FLAG_BOOST,
142};
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164struct i915_request {
165 struct dma_fence fence;
166 spinlock_t lock;
167
168
169
170
171
172
173
174
175
176
177
178 struct intel_engine_cs *engine;
179 struct intel_context *context;
180 struct intel_ring *ring;
181 struct intel_timeline __rcu *timeline;
182
183 struct list_head signal_link;
184 struct llist_node signal_node;
185
186
187
188
189
190
191
192 unsigned long rcustate;
193
194
195
196
197
198
199
200
201 struct pin_cookie cookie;
202
203
204
205
206
207
208
209
210 struct i915_sw_fence submit;
211 union {
212 wait_queue_entry_t submitq;
213 struct i915_sw_dma_fence_cb dmaq;
214 struct i915_request_duration_cb {
215 struct dma_fence_cb cb;
216 ktime_t emitted;
217 } duration;
218 };
219 struct llist_head execute_cb;
220 struct i915_sw_fence semaphore;
221
222
223
224
225
226
227
228
229
230
231 struct i915_sched_node sched;
232 struct i915_dependency dep;
233 intel_engine_mask_t execution_mask;
234
235
236
237
238
239
240 const u32 *hwsp_seqno;
241
242
243 u32 head;
244
245
246 u32 infix;
247
248
249
250
251
252
253 u32 postfix;
254
255
256 u32 tail;
257
258
259 u32 wa_tail;
260
261
262 u32 reserved_space;
263
264
265
266
267 struct i915_vma *batch;
268
269
270
271
272
273
274 struct i915_capture_list *capture_list;
275
276
277 unsigned long emitted_jiffies;
278
279
280 struct list_head link;
281
282
283 struct i915_request_watchdog {
284 struct llist_node link;
285 struct hrtimer timer;
286 } watchdog;
287
288
289
290
291
292
293
294 struct list_head guc_fence_link;
295
296
297
298
299
300
301#define GUC_PRIO_INIT 0xff
302#define GUC_PRIO_FINI 0xfe
303 u8 guc_prio;
304
305 I915_SELFTEST_DECLARE(struct {
306 struct list_head link;
307 unsigned long delay;
308 } mock;)
309};
310
311#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
312
313extern const struct dma_fence_ops i915_fence_ops;
314
315static inline bool dma_fence_is_i915(const struct dma_fence *fence)
316{
317 return fence->ops == &i915_fence_ops;
318}
319
320struct kmem_cache *i915_request_slab_cache(void);
321
322struct i915_request * __must_check
323__i915_request_create(struct intel_context *ce, gfp_t gfp);
324struct i915_request * __must_check
325i915_request_create(struct intel_context *ce);
326
327void __i915_request_skip(struct i915_request *rq);
328bool i915_request_set_error_once(struct i915_request *rq, int error);
329struct i915_request *i915_request_mark_eio(struct i915_request *rq);
330
331struct i915_request *__i915_request_commit(struct i915_request *request);
332void __i915_request_queue(struct i915_request *rq,
333 const struct i915_sched_attr *attr);
334void __i915_request_queue_bh(struct i915_request *rq);
335
336bool i915_request_retire(struct i915_request *rq);
337void i915_request_retire_upto(struct i915_request *rq);
338
339static inline struct i915_request *
340to_request(struct dma_fence *fence)
341{
342
343 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
344 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
345 return container_of(fence, struct i915_request, fence);
346}
347
348static inline struct i915_request *
349i915_request_get(struct i915_request *rq)
350{
351 return to_request(dma_fence_get(&rq->fence));
352}
353
354static inline struct i915_request *
355i915_request_get_rcu(struct i915_request *rq)
356{
357 return to_request(dma_fence_get_rcu(&rq->fence));
358}
359
360static inline void
361i915_request_put(struct i915_request *rq)
362{
363 dma_fence_put(&rq->fence);
364}
365
366int i915_request_await_object(struct i915_request *to,
367 struct drm_i915_gem_object *obj,
368 bool write);
369int i915_request_await_dma_fence(struct i915_request *rq,
370 struct dma_fence *fence);
371int i915_request_await_execution(struct i915_request *rq,
372 struct dma_fence *fence);
373
374void i915_request_add(struct i915_request *rq);
375
376bool __i915_request_submit(struct i915_request *request);
377void i915_request_submit(struct i915_request *request);
378
379void __i915_request_unsubmit(struct i915_request *request);
380void i915_request_unsubmit(struct i915_request *request);
381
382void i915_request_cancel(struct i915_request *rq, int error);
383
384long i915_request_wait(struct i915_request *rq,
385 unsigned int flags,
386 long timeout)
387 __attribute__((nonnull(1)));
388#define I915_WAIT_INTERRUPTIBLE BIT(0)
389#define I915_WAIT_PRIORITY BIT(1)
390#define I915_WAIT_ALL BIT(2)
391
392void i915_request_show(struct drm_printer *m,
393 const struct i915_request *rq,
394 const char *prefix,
395 int indent);
396
397static inline bool i915_request_signaled(const struct i915_request *rq)
398{
399
400 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
401}
402
403static inline bool i915_request_is_active(const struct i915_request *rq)
404{
405 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
406}
407
408static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
409{
410 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
411}
412
413static inline bool
414i915_request_has_initial_breadcrumb(const struct i915_request *rq)
415{
416 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
417}
418
419
420
421
422static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
423{
424 return (s32)(seq1 - seq2) >= 0;
425}
426
427static inline u32 __hwsp_seqno(const struct i915_request *rq)
428{
429 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
430
431 return READ_ONCE(*hwsp);
432}
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447static inline u32 hwsp_seqno(const struct i915_request *rq)
448{
449 u32 seqno;
450
451 rcu_read_lock();
452 seqno = __hwsp_seqno(rq);
453 rcu_read_unlock();
454
455 return seqno;
456}
457
458static inline bool __i915_request_has_started(const struct i915_request *rq)
459{
460 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
461}
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489static inline bool i915_request_started(const struct i915_request *rq)
490{
491 bool result;
492
493 if (i915_request_signaled(rq))
494 return true;
495
496 result = true;
497 rcu_read_lock();
498 if (likely(!i915_request_signaled(rq)))
499
500 result = __i915_request_has_started(rq);
501 rcu_read_unlock();
502
503 return result;
504}
505
506
507
508
509
510
511
512
513
514static inline bool i915_request_is_running(const struct i915_request *rq)
515{
516 bool result;
517
518 if (!i915_request_is_active(rq))
519 return false;
520
521 rcu_read_lock();
522 result = __i915_request_has_started(rq) && i915_request_is_active(rq);
523 rcu_read_unlock();
524
525 return result;
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544static inline bool i915_request_is_ready(const struct i915_request *rq)
545{
546 return !list_empty(&rq->sched.link);
547}
548
549static inline bool __i915_request_is_complete(const struct i915_request *rq)
550{
551 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
552}
553
554static inline bool i915_request_completed(const struct i915_request *rq)
555{
556 bool result;
557
558 if (i915_request_signaled(rq))
559 return true;
560
561 result = true;
562 rcu_read_lock();
563 if (likely(!i915_request_signaled(rq)))
564 result = __i915_request_is_complete(rq);
565 rcu_read_unlock();
566
567 return result;
568}
569
570static inline void i915_request_mark_complete(struct i915_request *rq)
571{
572 WRITE_ONCE(rq->hwsp_seqno,
573 (u32 *)&rq->fence.seqno);
574}
575
576static inline bool i915_request_has_waitboost(const struct i915_request *rq)
577{
578 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
579}
580
581static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
582{
583
584 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
585}
586
587static inline bool i915_request_has_sentinel(const struct i915_request *rq)
588{
589 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
590}
591
592static inline bool i915_request_on_hold(const struct i915_request *rq)
593{
594 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
595}
596
597static inline void i915_request_set_hold(struct i915_request *rq)
598{
599 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
600}
601
602static inline void i915_request_clear_hold(struct i915_request *rq)
603{
604 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
605}
606
607static inline struct intel_timeline *
608i915_request_timeline(const struct i915_request *rq)
609{
610
611 return rcu_dereference_protected(rq->timeline,
612 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
613}
614
615static inline struct i915_gem_context *
616i915_request_gem_context(const struct i915_request *rq)
617{
618
619 return rcu_dereference_protected(rq->context->gem_context, true);
620}
621
622static inline struct intel_timeline *
623i915_request_active_timeline(const struct i915_request *rq)
624{
625
626
627
628
629
630 return rcu_dereference_protected(rq->timeline,
631 lockdep_is_held(&rq->engine->sched_engine->lock));
632}
633
634static inline u32
635i915_request_active_seqno(const struct i915_request *rq)
636{
637 u32 hwsp_phys_base =
638 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
639 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
640
641
642
643
644
645
646
647
648
649
650
651
652 return hwsp_phys_base + hwsp_relative_offset;
653}
654
655bool
656i915_request_active_engine(struct i915_request *rq,
657 struct intel_engine_cs **active);
658
659void i915_request_notify_execute_cb_imm(struct i915_request *rq);
660
661enum i915_request_state {
662 I915_REQUEST_UNKNOWN = 0,
663 I915_REQUEST_COMPLETE,
664 I915_REQUEST_PENDING,
665 I915_REQUEST_QUEUED,
666 I915_REQUEST_ACTIVE,
667};
668
669enum i915_request_state i915_test_request_state(struct i915_request *rq);
670
671void i915_request_module_exit(void);
672int i915_request_module_init(void);
673
674#endif
675