1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/dma-fence-array.h>
26#include <linux/dma-fence-chain.h>
27#include <linux/irq_work.h>
28#include <linux/prefetch.h>
29#include <linux/sched.h>
30#include <linux/sched/clock.h>
31#include <linux/sched/signal.h>
32
33#include "gem/i915_gem_context.h"
34#include "gt/intel_breadcrumbs.h"
35#include "gt/intel_context.h"
36#include "gt/intel_engine.h"
37#include "gt/intel_engine_heartbeat.h"
38#include "gt/intel_gpu_commands.h"
39#include "gt/intel_reset.h"
40#include "gt/intel_ring.h"
41#include "gt/intel_rps.h"
42
43#include "i915_active.h"
44#include "i915_drv.h"
45#include "i915_globals.h"
46#include "i915_trace.h"
47#include "intel_pm.h"
48
49struct execute_cb {
50 struct irq_work work;
51 struct i915_sw_fence *fence;
52 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
53 struct i915_request *signal;
54};
55
56static struct i915_global_request {
57 struct i915_global base;
58 struct kmem_cache *slab_requests;
59 struct kmem_cache *slab_execute_cbs;
60} global;
61
62static const char *i915_fence_get_driver_name(struct dma_fence *fence)
63{
64 return dev_name(to_request(fence)->engine->i915->drm.dev);
65}
66
67static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
68{
69 const struct i915_gem_context *ctx;
70
71
72
73
74
75
76
77
78
79
80 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
81 return "signaled";
82
83 ctx = i915_request_gem_context(to_request(fence));
84 if (!ctx)
85 return "[" DRIVER_NAME "]";
86
87 return ctx->name;
88}
89
90static bool i915_fence_signaled(struct dma_fence *fence)
91{
92 return i915_request_completed(to_request(fence));
93}
94
95static bool i915_fence_enable_signaling(struct dma_fence *fence)
96{
97 return i915_request_enable_breadcrumb(to_request(fence));
98}
99
100static signed long i915_fence_wait(struct dma_fence *fence,
101 bool interruptible,
102 signed long timeout)
103{
104 return i915_request_wait(to_request(fence),
105 interruptible | I915_WAIT_PRIORITY,
106 timeout);
107}
108
109struct kmem_cache *i915_request_slab_cache(void)
110{
111 return global.slab_requests;
112}
113
114static void i915_fence_release(struct dma_fence *fence)
115{
116 struct i915_request *rq = to_request(fence);
117
118
119
120
121
122
123
124
125 i915_sw_fence_fini(&rq->submit);
126 i915_sw_fence_fini(&rq->semaphore);
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159 if (is_power_of_2(rq->execution_mask) &&
160 !cmpxchg(&rq->engine->request_pool, NULL, rq))
161 return;
162
163 kmem_cache_free(global.slab_requests, rq);
164}
165
166const struct dma_fence_ops i915_fence_ops = {
167 .get_driver_name = i915_fence_get_driver_name,
168 .get_timeline_name = i915_fence_get_timeline_name,
169 .enable_signaling = i915_fence_enable_signaling,
170 .signaled = i915_fence_signaled,
171 .wait = i915_fence_wait,
172 .release = i915_fence_release,
173};
174
175static void irq_execute_cb(struct irq_work *wrk)
176{
177 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
178
179 i915_sw_fence_complete(cb->fence);
180 kmem_cache_free(global.slab_execute_cbs, cb);
181}
182
183static void irq_execute_cb_hook(struct irq_work *wrk)
184{
185 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
186
187 cb->hook(container_of(cb->fence, struct i915_request, submit),
188 &cb->signal->fence);
189 i915_request_put(cb->signal);
190
191 irq_execute_cb(wrk);
192}
193
194static __always_inline void
195__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
196{
197 struct execute_cb *cb, *cn;
198
199 if (llist_empty(&rq->execute_cb))
200 return;
201
202 llist_for_each_entry_safe(cb, cn,
203 llist_del_all(&rq->execute_cb),
204 work.node.llist)
205 fn(&cb->work);
206}
207
208static void __notify_execute_cb_irq(struct i915_request *rq)
209{
210 __notify_execute_cb(rq, irq_work_queue);
211}
212
213static bool irq_work_imm(struct irq_work *wrk)
214{
215 wrk->func(wrk);
216 return false;
217}
218
219static void __notify_execute_cb_imm(struct i915_request *rq)
220{
221 __notify_execute_cb(rq, irq_work_imm);
222}
223
224static void free_capture_list(struct i915_request *request)
225{
226 struct i915_capture_list *capture;
227
228 capture = fetch_and_zero(&request->capture_list);
229 while (capture) {
230 struct i915_capture_list *next = capture->next;
231
232 kfree(capture);
233 capture = next;
234 }
235}
236
237static void __i915_request_fill(struct i915_request *rq, u8 val)
238{
239 void *vaddr = rq->ring->vaddr;
240 u32 head;
241
242 head = rq->infix;
243 if (rq->postfix < head) {
244 memset(vaddr + head, val, rq->ring->size - head);
245 head = 0;
246 }
247 memset(vaddr + head, val, rq->postfix - head);
248}
249
250
251
252
253
254
255
256
257
258
259
260bool
261i915_request_active_engine(struct i915_request *rq,
262 struct intel_engine_cs **active)
263{
264 struct intel_engine_cs *engine, *locked;
265 bool ret = false;
266
267
268
269
270
271
272
273
274 locked = READ_ONCE(rq->engine);
275 spin_lock_irq(&locked->active.lock);
276 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
277 spin_unlock(&locked->active.lock);
278 locked = engine;
279 spin_lock(&locked->active.lock);
280 }
281
282 if (i915_request_is_active(rq)) {
283 if (!__i915_request_is_complete(rq))
284 *active = locked;
285 ret = true;
286 }
287
288 spin_unlock_irq(&locked->active.lock);
289
290 return ret;
291}
292
293
294static void remove_from_engine(struct i915_request *rq)
295{
296 struct intel_engine_cs *engine, *locked;
297
298
299
300
301
302
303
304 locked = READ_ONCE(rq->engine);
305 spin_lock_irq(&locked->active.lock);
306 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
307 spin_unlock(&locked->active.lock);
308 spin_lock(&engine->active.lock);
309 locked = engine;
310 }
311 list_del_init(&rq->sched.link);
312
313 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
314 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
315
316
317 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
318
319 spin_unlock_irq(&locked->active.lock);
320
321 __notify_execute_cb_imm(rq);
322}
323
324static void __rq_init_watchdog(struct i915_request *rq)
325{
326 rq->watchdog.timer.function = NULL;
327}
328
329static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
330{
331 struct i915_request *rq =
332 container_of(hrtimer, struct i915_request, watchdog.timer);
333 struct intel_gt *gt = rq->engine->gt;
334
335 if (!i915_request_completed(rq)) {
336 if (llist_add(&rq->watchdog.link, >->watchdog.list))
337 schedule_work(>->watchdog.work);
338 } else {
339 i915_request_put(rq);
340 }
341
342 return HRTIMER_NORESTART;
343}
344
345static void __rq_arm_watchdog(struct i915_request *rq)
346{
347 struct i915_request_watchdog *wdg = &rq->watchdog;
348 struct intel_context *ce = rq->context;
349
350 if (!ce->watchdog.timeout_us)
351 return;
352
353 i915_request_get(rq);
354
355 hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
356 wdg->timer.function = __rq_watchdog_expired;
357 hrtimer_start_range_ns(&wdg->timer,
358 ns_to_ktime(ce->watchdog.timeout_us *
359 NSEC_PER_USEC),
360 NSEC_PER_MSEC,
361 HRTIMER_MODE_REL);
362}
363
364static void __rq_cancel_watchdog(struct i915_request *rq)
365{
366 struct i915_request_watchdog *wdg = &rq->watchdog;
367
368 if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
369 i915_request_put(rq);
370}
371
372bool i915_request_retire(struct i915_request *rq)
373{
374 if (!__i915_request_is_complete(rq))
375 return false;
376
377 RQ_TRACE(rq, "\n");
378
379 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
380 trace_i915_request_retire(rq);
381 i915_request_mark_complete(rq);
382
383 __rq_cancel_watchdog(rq);
384
385
386
387
388
389
390
391
392
393
394 GEM_BUG_ON(!list_is_first(&rq->link,
395 &i915_request_timeline(rq)->requests));
396 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
397
398 __i915_request_fill(rq, POISON_FREE);
399 rq->ring->head = rq->postfix;
400
401 if (!i915_request_signaled(rq)) {
402 spin_lock_irq(&rq->lock);
403 dma_fence_signal_locked(&rq->fence);
404 spin_unlock_irq(&rq->lock);
405 }
406
407 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
408 atomic_dec(&rq->engine->gt->rps.num_waiters);
409
410
411
412
413
414
415
416
417
418
419
420 if (!list_empty(&rq->sched.link))
421 remove_from_engine(rq);
422 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
423
424 __list_del_entry(&rq->link);
425
426 intel_context_exit(rq->context);
427 intel_context_unpin(rq->context);
428
429 free_capture_list(rq);
430 i915_sched_node_fini(&rq->sched);
431 i915_request_put(rq);
432
433 return true;
434}
435
436void i915_request_retire_upto(struct i915_request *rq)
437{
438 struct intel_timeline * const tl = i915_request_timeline(rq);
439 struct i915_request *tmp;
440
441 RQ_TRACE(rq, "\n");
442 GEM_BUG_ON(!__i915_request_is_complete(rq));
443
444 do {
445 tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
446 } while (i915_request_retire(tmp) && tmp != rq);
447}
448
449static struct i915_request * const *
450__engine_active(struct intel_engine_cs *engine)
451{
452 return READ_ONCE(engine->execlists.active);
453}
454
455static bool __request_in_flight(const struct i915_request *signal)
456{
457 struct i915_request * const *port, *rq;
458 bool inflight = false;
459
460 if (!i915_request_is_ready(signal))
461 return false;
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499 if (!intel_context_inflight(signal->context))
500 return false;
501
502 rcu_read_lock();
503 for (port = __engine_active(signal->engine);
504 (rq = READ_ONCE(*port));
505 port++) {
506 if (rq->context == signal->context) {
507 inflight = i915_seqno_passed(rq->fence.seqno,
508 signal->fence.seqno);
509 break;
510 }
511 }
512 rcu_read_unlock();
513
514 return inflight;
515}
516
517static int
518__await_execution(struct i915_request *rq,
519 struct i915_request *signal,
520 void (*hook)(struct i915_request *rq,
521 struct dma_fence *signal),
522 gfp_t gfp)
523{
524 struct execute_cb *cb;
525
526 if (i915_request_is_active(signal)) {
527 if (hook)
528 hook(rq, &signal->fence);
529 return 0;
530 }
531
532 cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
533 if (!cb)
534 return -ENOMEM;
535
536 cb->fence = &rq->submit;
537 i915_sw_fence_await(cb->fence);
538 init_irq_work(&cb->work, irq_execute_cb);
539
540 if (hook) {
541 cb->hook = hook;
542 cb->signal = i915_request_get(signal);
543 cb->work.func = irq_execute_cb_hook;
544 }
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559 if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
560 if (i915_request_is_active(signal) ||
561 __request_in_flight(signal))
562 __notify_execute_cb_imm(signal);
563 }
564
565 return 0;
566}
567
568static bool fatal_error(int error)
569{
570 switch (error) {
571 case 0:
572 case -EAGAIN:
573 case -ETIMEDOUT:
574 return false;
575 default:
576 return true;
577 }
578}
579
580void __i915_request_skip(struct i915_request *rq)
581{
582 GEM_BUG_ON(!fatal_error(rq->fence.error));
583
584 if (rq->infix == rq->postfix)
585 return;
586
587 RQ_TRACE(rq, "error: %d\n", rq->fence.error);
588
589
590
591
592
593
594 __i915_request_fill(rq, 0);
595 rq->infix = rq->postfix;
596}
597
598bool i915_request_set_error_once(struct i915_request *rq, int error)
599{
600 int old;
601
602 GEM_BUG_ON(!IS_ERR_VALUE((long)error));
603
604 if (i915_request_signaled(rq))
605 return false;
606
607 old = READ_ONCE(rq->fence.error);
608 do {
609 if (fatal_error(old))
610 return false;
611 } while (!try_cmpxchg(&rq->fence.error, &old, error));
612
613 return true;
614}
615
616struct i915_request *i915_request_mark_eio(struct i915_request *rq)
617{
618 if (__i915_request_is_complete(rq))
619 return NULL;
620
621 GEM_BUG_ON(i915_request_signaled(rq));
622
623
624 rq = i915_request_get(rq);
625
626 i915_request_set_error_once(rq, -EIO);
627 i915_request_mark_complete(rq);
628
629 return rq;
630}
631
632bool __i915_request_submit(struct i915_request *request)
633{
634 struct intel_engine_cs *engine = request->engine;
635 bool result = false;
636
637 RQ_TRACE(request, "\n");
638
639 GEM_BUG_ON(!irqs_disabled());
640 lockdep_assert_held(&engine->active.lock);
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658 if (__i915_request_is_complete(request)) {
659 list_del_init(&request->sched.link);
660 goto active;
661 }
662
663 if (unlikely(intel_context_is_banned(request->context)))
664 i915_request_set_error_once(request, -EIO);
665
666 if (unlikely(fatal_error(request->fence.error)))
667 __i915_request_skip(request);
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685 if (request->sched.semaphores &&
686 i915_sw_fence_signaled(&request->semaphore))
687 engine->saturated |= request->sched.semaphores;
688
689 engine->emit_fini_breadcrumb(request,
690 request->ring->vaddr + request->postfix);
691
692 trace_i915_request_execute(request);
693 engine->serial++;
694 result = true;
695
696 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
697 list_move_tail(&request->sched.link, &engine->active.requests);
698active:
699 clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
700 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
701
702
703
704
705
706
707
708
709
710
711
712 __notify_execute_cb_irq(request);
713
714
715 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
716 i915_request_enable_breadcrumb(request);
717
718 return result;
719}
720
721void i915_request_submit(struct i915_request *request)
722{
723 struct intel_engine_cs *engine = request->engine;
724 unsigned long flags;
725
726
727 spin_lock_irqsave(&engine->active.lock, flags);
728
729 __i915_request_submit(request);
730
731 spin_unlock_irqrestore(&engine->active.lock, flags);
732}
733
734void __i915_request_unsubmit(struct i915_request *request)
735{
736 struct intel_engine_cs *engine = request->engine;
737
738
739
740
741
742 RQ_TRACE(request, "\n");
743
744 GEM_BUG_ON(!irqs_disabled());
745 lockdep_assert_held(&engine->active.lock);
746
747
748
749
750
751
752
753
754 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
755 clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
756 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
757 i915_request_cancel_breadcrumb(request);
758
759
760 if (request->sched.semaphores && __i915_request_has_started(request))
761 request->sched.semaphores = 0;
762
763
764
765
766
767
768
769
770}
771
772void i915_request_unsubmit(struct i915_request *request)
773{
774 struct intel_engine_cs *engine = request->engine;
775 unsigned long flags;
776
777
778 spin_lock_irqsave(&engine->active.lock, flags);
779
780 __i915_request_unsubmit(request);
781
782 spin_unlock_irqrestore(&engine->active.lock, flags);
783}
784
785static void __cancel_request(struct i915_request *rq)
786{
787 struct intel_engine_cs *engine = NULL;
788
789 i915_request_active_engine(rq, &engine);
790
791 if (engine && intel_engine_pulse(engine))
792 intel_gt_handle_error(engine->gt, engine->mask, 0,
793 "request cancellation by %s",
794 current->comm);
795}
796
797void i915_request_cancel(struct i915_request *rq, int error)
798{
799 if (!i915_request_set_error_once(rq, error))
800 return;
801
802 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
803
804 __cancel_request(rq);
805}
806
807static int __i915_sw_fence_call
808submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
809{
810 struct i915_request *request =
811 container_of(fence, typeof(*request), submit);
812
813 switch (state) {
814 case FENCE_COMPLETE:
815 trace_i915_request_submit(request);
816
817 if (unlikely(fence->error))
818 i915_request_set_error_once(request, fence->error);
819 else
820 __rq_arm_watchdog(request);
821
822
823
824
825
826
827
828
829
830 rcu_read_lock();
831 request->engine->submit_request(request);
832 rcu_read_unlock();
833 break;
834
835 case FENCE_FREE:
836 i915_request_put(request);
837 break;
838 }
839
840 return NOTIFY_DONE;
841}
842
843static int __i915_sw_fence_call
844semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
845{
846 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
847
848 switch (state) {
849 case FENCE_COMPLETE:
850 break;
851
852 case FENCE_FREE:
853 i915_request_put(rq);
854 break;
855 }
856
857 return NOTIFY_DONE;
858}
859
860static void retire_requests(struct intel_timeline *tl)
861{
862 struct i915_request *rq, *rn;
863
864 list_for_each_entry_safe(rq, rn, &tl->requests, link)
865 if (!i915_request_retire(rq))
866 break;
867}
868
869static noinline struct i915_request *
870request_alloc_slow(struct intel_timeline *tl,
871 struct i915_request **rsvd,
872 gfp_t gfp)
873{
874 struct i915_request *rq;
875
876
877 if (!gfpflags_allow_blocking(gfp)) {
878 rq = xchg(rsvd, NULL);
879 if (!rq)
880 goto out;
881
882 return rq;
883 }
884
885 if (list_empty(&tl->requests))
886 goto out;
887
888
889 rq = list_first_entry(&tl->requests, typeof(*rq), link);
890 i915_request_retire(rq);
891
892 rq = kmem_cache_alloc(global.slab_requests,
893 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
894 if (rq)
895 return rq;
896
897
898 rq = list_last_entry(&tl->requests, typeof(*rq), link);
899 cond_synchronize_rcu(rq->rcustate);
900
901
902 retire_requests(tl);
903
904out:
905 return kmem_cache_alloc(global.slab_requests, gfp);
906}
907
908static void __i915_request_ctor(void *arg)
909{
910 struct i915_request *rq = arg;
911
912 spin_lock_init(&rq->lock);
913 i915_sched_node_init(&rq->sched);
914 i915_sw_fence_init(&rq->submit, submit_notify);
915 i915_sw_fence_init(&rq->semaphore, semaphore_notify);
916
917 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
918
919 rq->capture_list = NULL;
920
921 init_llist_head(&rq->execute_cb);
922}
923
924struct i915_request *
925__i915_request_create(struct intel_context *ce, gfp_t gfp)
926{
927 struct intel_timeline *tl = ce->timeline;
928 struct i915_request *rq;
929 u32 seqno;
930 int ret;
931
932 might_sleep_if(gfpflags_allow_blocking(gfp));
933
934
935 __intel_context_pin(ce);
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966 rq = kmem_cache_alloc(global.slab_requests,
967 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
968 if (unlikely(!rq)) {
969 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
970 if (!rq) {
971 ret = -ENOMEM;
972 goto err_unreserve;
973 }
974 }
975
976 rq->context = ce;
977 rq->engine = ce->engine;
978 rq->ring = ce->ring;
979 rq->execution_mask = ce->engine->mask;
980
981 kref_init(&rq->fence.refcount);
982 rq->fence.flags = 0;
983 rq->fence.error = 0;
984 INIT_LIST_HEAD(&rq->fence.cb_list);
985
986 ret = intel_timeline_get_seqno(tl, rq, &seqno);
987 if (ret)
988 goto err_free;
989
990 rq->fence.context = tl->fence_context;
991 rq->fence.seqno = seqno;
992
993 RCU_INIT_POINTER(rq->timeline, tl);
994 rq->hwsp_seqno = tl->hwsp_seqno;
995 GEM_BUG_ON(__i915_request_is_complete(rq));
996
997 rq->rcustate = get_state_synchronize_rcu();
998
999
1000 i915_sw_fence_reinit(&i915_request_get(rq)->submit);
1001 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
1002
1003 i915_sched_node_reinit(&rq->sched);
1004
1005
1006 rq->batch = NULL;
1007 __rq_init_watchdog(rq);
1008 GEM_BUG_ON(rq->capture_list);
1009 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 rq->reserved_space =
1024 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
1025
1026
1027
1028
1029
1030
1031
1032 rq->head = rq->ring->emit;
1033
1034 ret = rq->engine->request_alloc(rq);
1035 if (ret)
1036 goto err_unwind;
1037
1038 rq->infix = rq->ring->emit;
1039
1040 intel_context_mark_active(ce);
1041 list_add_tail_rcu(&rq->link, &tl->requests);
1042
1043 return rq;
1044
1045err_unwind:
1046 ce->ring->emit = rq->head;
1047
1048
1049 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
1050 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
1051
1052err_free:
1053 kmem_cache_free(global.slab_requests, rq);
1054err_unreserve:
1055 intel_context_unpin(ce);
1056 return ERR_PTR(ret);
1057}
1058
1059struct i915_request *
1060i915_request_create(struct intel_context *ce)
1061{
1062 struct i915_request *rq;
1063 struct intel_timeline *tl;
1064
1065 tl = intel_context_timeline_lock(ce);
1066 if (IS_ERR(tl))
1067 return ERR_CAST(tl);
1068
1069
1070 rq = list_first_entry(&tl->requests, typeof(*rq), link);
1071 if (!list_is_last(&rq->link, &tl->requests))
1072 i915_request_retire(rq);
1073
1074 intel_context_enter(ce);
1075 rq = __i915_request_create(ce, GFP_KERNEL);
1076 intel_context_exit(ce);
1077 if (IS_ERR(rq))
1078 goto err_unlock;
1079
1080
1081 rq->cookie = lockdep_pin_lock(&tl->mutex);
1082
1083 return rq;
1084
1085err_unlock:
1086 intel_context_timeline_unlock(tl);
1087 return rq;
1088}
1089
1090static int
1091i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1092{
1093 struct dma_fence *fence;
1094 int err;
1095
1096 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1097 return 0;
1098
1099 if (i915_request_started(signal))
1100 return 0;
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 fence = NULL;
1111 rcu_read_lock();
1112 do {
1113 struct list_head *pos = READ_ONCE(signal->link.prev);
1114 struct i915_request *prev;
1115
1116
1117 if (unlikely(__i915_request_has_started(signal)))
1118 break;
1119
1120
1121 if (pos == &rcu_dereference(signal->timeline)->requests)
1122 break;
1123
1124
1125
1126
1127
1128
1129
1130 prev = list_entry(pos, typeof(*prev), link);
1131 if (!i915_request_get_rcu(prev))
1132 break;
1133
1134
1135 if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
1136 i915_request_put(prev);
1137 break;
1138 }
1139
1140 fence = &prev->fence;
1141 } while (0);
1142 rcu_read_unlock();
1143 if (!fence)
1144 return 0;
1145
1146 err = 0;
1147 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1148 err = i915_sw_fence_await_dma_fence(&rq->submit,
1149 fence, 0,
1150 I915_FENCE_GFP);
1151 dma_fence_put(fence);
1152
1153 return err;
1154}
1155
1156static intel_engine_mask_t
1157already_busywaiting(struct i915_request *rq)
1158{
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1172}
1173
1174static int
1175__emit_semaphore_wait(struct i915_request *to,
1176 struct i915_request *from,
1177 u32 seqno)
1178{
1179 const int has_token = INTEL_GEN(to->engine->i915) >= 12;
1180 u32 hwsp_offset;
1181 int len, err;
1182 u32 *cs;
1183
1184 GEM_BUG_ON(INTEL_GEN(to->engine->i915) < 8);
1185 GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
1186
1187
1188 err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
1189 if (err)
1190 return err;
1191
1192 len = 4;
1193 if (has_token)
1194 len += 2;
1195
1196 cs = intel_ring_begin(to, len);
1197 if (IS_ERR(cs))
1198 return PTR_ERR(cs);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 *cs++ = (MI_SEMAPHORE_WAIT |
1209 MI_SEMAPHORE_GLOBAL_GTT |
1210 MI_SEMAPHORE_POLL |
1211 MI_SEMAPHORE_SAD_GTE_SDD) +
1212 has_token;
1213 *cs++ = seqno;
1214 *cs++ = hwsp_offset;
1215 *cs++ = 0;
1216 if (has_token) {
1217 *cs++ = 0;
1218 *cs++ = MI_NOOP;
1219 }
1220
1221 intel_ring_advance(to, cs);
1222 return 0;
1223}
1224
1225static int
1226emit_semaphore_wait(struct i915_request *to,
1227 struct i915_request *from,
1228 gfp_t gfp)
1229{
1230 const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
1231 struct i915_sw_fence *wait = &to->submit;
1232
1233 if (!intel_context_use_semaphores(to->context))
1234 goto await_fence;
1235
1236 if (i915_request_has_initial_breadcrumb(to))
1237 goto await_fence;
1238
1239
1240
1241
1242
1243
1244
1245 if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
1246 goto await_fence;
1247
1248
1249 if (already_busywaiting(to) & mask)
1250 goto await_fence;
1251
1252 if (i915_request_await_start(to, from) < 0)
1253 goto await_fence;
1254
1255
1256 if (__await_execution(to, from, NULL, gfp))
1257 goto await_fence;
1258
1259 if (__emit_semaphore_wait(to, from, from->fence.seqno))
1260 goto await_fence;
1261
1262 to->sched.semaphores |= mask;
1263 wait = &to->semaphore;
1264
1265await_fence:
1266 return i915_sw_fence_await_dma_fence(wait,
1267 &from->fence, 0,
1268 I915_FENCE_GFP);
1269}
1270
1271static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
1272 struct dma_fence *fence)
1273{
1274 return __intel_timeline_sync_is_later(tl,
1275 fence->context,
1276 fence->seqno - 1);
1277}
1278
1279static int intel_timeline_sync_set_start(struct intel_timeline *tl,
1280 const struct dma_fence *fence)
1281{
1282 return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
1283}
1284
1285static int
1286__i915_request_await_execution(struct i915_request *to,
1287 struct i915_request *from,
1288 void (*hook)(struct i915_request *rq,
1289 struct dma_fence *signal))
1290{
1291 int err;
1292
1293 GEM_BUG_ON(intel_context_is_barrier(from->context));
1294
1295
1296 err = __await_execution(to, from, hook, I915_FENCE_GFP);
1297 if (err)
1298 return err;
1299
1300
1301 if (intel_timeline_sync_has_start(i915_request_timeline(to),
1302 &from->fence))
1303 return 0;
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 err = i915_request_await_start(to, from);
1316 if (err < 0)
1317 return err;
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 if (intel_engine_has_semaphores(to->engine) &&
1339 !i915_request_has_initial_breadcrumb(to)) {
1340 err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
1341 if (err < 0)
1342 return err;
1343 }
1344
1345
1346 if (to->engine->schedule) {
1347 err = i915_sched_node_add_dependency(&to->sched,
1348 &from->sched,
1349 I915_DEPENDENCY_WEAK);
1350 if (err < 0)
1351 return err;
1352 }
1353
1354 return intel_timeline_sync_set_start(i915_request_timeline(to),
1355 &from->fence);
1356}
1357
1358static void mark_external(struct i915_request *rq)
1359{
1360
1361
1362
1363
1364
1365
1366
1367
1368 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1369}
1370
1371static int
1372__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1373{
1374 mark_external(rq);
1375 return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1376 i915_fence_context_timeout(rq->engine->i915,
1377 fence->context),
1378 I915_FENCE_GFP);
1379}
1380
1381static int
1382i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1383{
1384 struct dma_fence *iter;
1385 int err = 0;
1386
1387 if (!to_dma_fence_chain(fence))
1388 return __i915_request_await_external(rq, fence);
1389
1390 dma_fence_chain_for_each(iter, fence) {
1391 struct dma_fence_chain *chain = to_dma_fence_chain(iter);
1392
1393 if (!dma_fence_is_i915(chain->fence)) {
1394 err = __i915_request_await_external(rq, iter);
1395 break;
1396 }
1397
1398 err = i915_request_await_dma_fence(rq, chain->fence);
1399 if (err < 0)
1400 break;
1401 }
1402
1403 dma_fence_put(iter);
1404 return err;
1405}
1406
1407int
1408i915_request_await_execution(struct i915_request *rq,
1409 struct dma_fence *fence,
1410 void (*hook)(struct i915_request *rq,
1411 struct dma_fence *signal))
1412{
1413 struct dma_fence **child = &fence;
1414 unsigned int nchild = 1;
1415 int ret;
1416
1417 if (dma_fence_is_array(fence)) {
1418 struct dma_fence_array *array = to_dma_fence_array(fence);
1419
1420
1421
1422 child = array->fences;
1423 nchild = array->num_fences;
1424 GEM_BUG_ON(!nchild);
1425 }
1426
1427 do {
1428 fence = *child++;
1429 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1430 i915_sw_fence_set_error_once(&rq->submit, fence->error);
1431 continue;
1432 }
1433
1434 if (fence->context == rq->fence.context)
1435 continue;
1436
1437
1438
1439
1440
1441
1442 if (dma_fence_is_i915(fence))
1443 ret = __i915_request_await_execution(rq,
1444 to_request(fence),
1445 hook);
1446 else
1447 ret = i915_request_await_external(rq, fence);
1448 if (ret < 0)
1449 return ret;
1450 } while (--nchild);
1451
1452 return 0;
1453}
1454
1455static int
1456await_request_submit(struct i915_request *to, struct i915_request *from)
1457{
1458
1459
1460
1461
1462
1463
1464
1465
1466 if (to->engine == READ_ONCE(from->engine))
1467 return i915_sw_fence_await_sw_fence_gfp(&to->submit,
1468 &from->submit,
1469 I915_FENCE_GFP);
1470 else
1471 return __i915_request_await_execution(to, from, NULL);
1472}
1473
1474static int
1475i915_request_await_request(struct i915_request *to, struct i915_request *from)
1476{
1477 int ret;
1478
1479 GEM_BUG_ON(to == from);
1480 GEM_BUG_ON(to->timeline == from->timeline);
1481
1482 if (i915_request_completed(from)) {
1483 i915_sw_fence_set_error_once(&to->submit, from->fence.error);
1484 return 0;
1485 }
1486
1487 if (to->engine->schedule) {
1488 ret = i915_sched_node_add_dependency(&to->sched,
1489 &from->sched,
1490 I915_DEPENDENCY_EXTERNAL);
1491 if (ret < 0)
1492 return ret;
1493 }
1494
1495 if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
1496 ret = await_request_submit(to, from);
1497 else
1498 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
1499 if (ret < 0)
1500 return ret;
1501
1502 return 0;
1503}
1504
1505int
1506i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1507{
1508 struct dma_fence **child = &fence;
1509 unsigned int nchild = 1;
1510 int ret;
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 if (dma_fence_is_array(fence)) {
1521 struct dma_fence_array *array = to_dma_fence_array(fence);
1522
1523 child = array->fences;
1524 nchild = array->num_fences;
1525 GEM_BUG_ON(!nchild);
1526 }
1527
1528 do {
1529 fence = *child++;
1530 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
1531 i915_sw_fence_set_error_once(&rq->submit, fence->error);
1532 continue;
1533 }
1534
1535
1536
1537
1538
1539
1540 if (fence->context == rq->fence.context)
1541 continue;
1542
1543
1544 if (fence->context &&
1545 intel_timeline_sync_is_later(i915_request_timeline(rq),
1546 fence))
1547 continue;
1548
1549 if (dma_fence_is_i915(fence))
1550 ret = i915_request_await_request(rq, to_request(fence));
1551 else
1552 ret = i915_request_await_external(rq, fence);
1553 if (ret < 0)
1554 return ret;
1555
1556
1557 if (fence->context)
1558 intel_timeline_sync_set(i915_request_timeline(rq),
1559 fence);
1560 } while (--nchild);
1561
1562 return 0;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585int
1586i915_request_await_object(struct i915_request *to,
1587 struct drm_i915_gem_object *obj,
1588 bool write)
1589{
1590 struct dma_fence *excl;
1591 int ret = 0;
1592
1593 if (write) {
1594 struct dma_fence **shared;
1595 unsigned int count, i;
1596
1597 ret = dma_resv_get_fences_rcu(obj->base.resv,
1598 &excl, &count, &shared);
1599 if (ret)
1600 return ret;
1601
1602 for (i = 0; i < count; i++) {
1603 ret = i915_request_await_dma_fence(to, shared[i]);
1604 if (ret)
1605 break;
1606
1607 dma_fence_put(shared[i]);
1608 }
1609
1610 for (; i < count; i++)
1611 dma_fence_put(shared[i]);
1612 kfree(shared);
1613 } else {
1614 excl = dma_resv_get_excl_rcu(obj->base.resv);
1615 }
1616
1617 if (excl) {
1618 if (ret == 0)
1619 ret = i915_request_await_dma_fence(to, excl);
1620
1621 dma_fence_put(excl);
1622 }
1623
1624 return ret;
1625}
1626
1627static struct i915_request *
1628__i915_request_add_to_timeline(struct i915_request *rq)
1629{
1630 struct intel_timeline *timeline = i915_request_timeline(rq);
1631 struct i915_request *prev;
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 prev = to_request(__i915_active_fence_set(&timeline->last_request,
1654 &rq->fence));
1655 if (prev && !__i915_request_is_complete(prev)) {
1656
1657
1658
1659
1660
1661
1662 GEM_BUG_ON(prev->context == rq->context &&
1663 i915_seqno_passed(prev->fence.seqno,
1664 rq->fence.seqno));
1665
1666 if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
1667 i915_sw_fence_await_sw_fence(&rq->submit,
1668 &prev->submit,
1669 &rq->submitq);
1670 else
1671 __i915_sw_fence_await_dma_fence(&rq->submit,
1672 &prev->fence,
1673 &rq->dmaq);
1674 if (rq->engine->schedule)
1675 __i915_sched_node_add_dependency(&rq->sched,
1676 &prev->sched,
1677 &rq->dep,
1678 0);
1679 }
1680
1681
1682
1683
1684
1685
1686 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1687
1688 return prev;
1689}
1690
1691
1692
1693
1694
1695
1696struct i915_request *__i915_request_commit(struct i915_request *rq)
1697{
1698 struct intel_engine_cs *engine = rq->engine;
1699 struct intel_ring *ring = rq->ring;
1700 u32 *cs;
1701
1702 RQ_TRACE(rq, "\n");
1703
1704
1705
1706
1707
1708
1709 GEM_BUG_ON(rq->reserved_space > ring->space);
1710 rq->reserved_space = 0;
1711 rq->emitted_jiffies = jiffies;
1712
1713
1714
1715
1716
1717
1718
1719 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1720 GEM_BUG_ON(IS_ERR(cs));
1721 rq->postfix = intel_ring_offset(rq, cs);
1722
1723 return __i915_request_add_to_timeline(rq);
1724}
1725
1726void __i915_request_queue_bh(struct i915_request *rq)
1727{
1728 i915_sw_fence_commit(&rq->semaphore);
1729 i915_sw_fence_commit(&rq->submit);
1730}
1731
1732void __i915_request_queue(struct i915_request *rq,
1733 const struct i915_sched_attr *attr)
1734{
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (attr && rq->engine->schedule)
1747 rq->engine->schedule(rq, attr);
1748
1749 local_bh_disable();
1750 __i915_request_queue_bh(rq);
1751 local_bh_enable();
1752}
1753
1754void i915_request_add(struct i915_request *rq)
1755{
1756 struct intel_timeline * const tl = i915_request_timeline(rq);
1757 struct i915_sched_attr attr = {};
1758 struct i915_gem_context *ctx;
1759
1760 lockdep_assert_held(&tl->mutex);
1761 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1762
1763 trace_i915_request_add(rq);
1764 __i915_request_commit(rq);
1765
1766
1767 rcu_read_lock();
1768 ctx = rcu_dereference(rq->context->gem_context);
1769 if (ctx)
1770 attr = ctx->sched;
1771 rcu_read_unlock();
1772
1773 __i915_request_queue(rq, &attr);
1774
1775 mutex_unlock(&tl->mutex);
1776}
1777
1778static unsigned long local_clock_ns(unsigned int *cpu)
1779{
1780 unsigned long t;
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 *cpu = get_cpu();
1795 t = local_clock();
1796 put_cpu();
1797
1798 return t;
1799}
1800
1801static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1802{
1803 unsigned int this_cpu;
1804
1805 if (time_after(local_clock_ns(&this_cpu), timeout))
1806 return true;
1807
1808 return this_cpu != cpu;
1809}
1810
1811static bool __i915_spin_request(struct i915_request * const rq, int state)
1812{
1813 unsigned long timeout_ns;
1814 unsigned int cpu;
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 if (!i915_request_is_running(rq))
1828 return false;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1842 timeout_ns += local_clock_ns(&cpu);
1843 do {
1844 if (dma_fence_is_signaled(&rq->fence))
1845 return true;
1846
1847 if (signal_pending_state(state, current))
1848 break;
1849
1850 if (busywait_stop(timeout_ns, cpu))
1851 break;
1852
1853 cpu_relax();
1854 } while (!need_resched());
1855
1856 return false;
1857}
1858
1859struct request_wait {
1860 struct dma_fence_cb cb;
1861 struct task_struct *tsk;
1862};
1863
1864static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1865{
1866 struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1867
1868 wake_up_process(fetch_and_zero(&wait->tsk));
1869}
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886long i915_request_wait(struct i915_request *rq,
1887 unsigned int flags,
1888 long timeout)
1889{
1890 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1891 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1892 struct request_wait wait;
1893
1894 might_sleep();
1895 GEM_BUG_ON(timeout < 0);
1896
1897 if (dma_fence_is_signaled(&rq->fence))
1898 return timeout;
1899
1900 if (!timeout)
1901 return -ETIME;
1902
1903 trace_i915_request_wait_begin(rq, flags);
1904
1905
1906
1907
1908
1909
1910
1911 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
1937 __i915_spin_request(rq, state))
1938 goto out;
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1953 intel_rps_boost(rq);
1954
1955 wait.tsk = current;
1956 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1957 goto out;
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 if (i915_request_is_ready(rq))
1975 __intel_engine_flush_submission(rq->engine, false);
1976
1977 for (;;) {
1978 set_current_state(state);
1979
1980 if (dma_fence_is_signaled(&rq->fence))
1981 break;
1982
1983 if (signal_pending_state(state, current)) {
1984 timeout = -ERESTARTSYS;
1985 break;
1986 }
1987
1988 if (!timeout) {
1989 timeout = -ETIME;
1990 break;
1991 }
1992
1993 timeout = io_schedule_timeout(timeout);
1994 }
1995 __set_current_state(TASK_RUNNING);
1996
1997 if (READ_ONCE(wait.tsk))
1998 dma_fence_remove_callback(&rq->fence, &wait.cb);
1999 GEM_BUG_ON(!list_empty(&wait.cb.node));
2000
2001out:
2002 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
2003 trace_i915_request_wait_end(rq);
2004 return timeout;
2005}
2006
2007static int print_sched_attr(const struct i915_sched_attr *attr,
2008 char *buf, int x, int len)
2009{
2010 if (attr->priority == I915_PRIORITY_INVALID)
2011 return x;
2012
2013 x += snprintf(buf + x, len - x,
2014 " prio=%d", attr->priority);
2015
2016 return x;
2017}
2018
2019static char queue_status(const struct i915_request *rq)
2020{
2021 if (i915_request_is_active(rq))
2022 return 'E';
2023
2024 if (i915_request_is_ready(rq))
2025 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
2026
2027 return 'U';
2028}
2029
2030static const char *run_status(const struct i915_request *rq)
2031{
2032 if (__i915_request_is_complete(rq))
2033 return "!";
2034
2035 if (__i915_request_has_started(rq))
2036 return "*";
2037
2038 if (!i915_sw_fence_signaled(&rq->semaphore))
2039 return "&";
2040
2041 return "";
2042}
2043
2044static const char *fence_status(const struct i915_request *rq)
2045{
2046 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
2047 return "+";
2048
2049 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2050 return "-";
2051
2052 return "";
2053}
2054
2055void i915_request_show(struct drm_printer *m,
2056 const struct i915_request *rq,
2057 const char *prefix,
2058 int indent)
2059{
2060 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
2061 char buf[80] = "";
2062 int x = 0;
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2095
2096 drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
2097 prefix, indent, " ",
2098 queue_status(rq),
2099 rq->fence.context, rq->fence.seqno,
2100 run_status(rq),
2101 fence_status(rq),
2102 buf,
2103 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2104 name);
2105}
2106
2107#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2108#include "selftests/mock_request.c"
2109#include "selftests/i915_request.c"
2110#endif
2111
2112static void i915_global_request_shrink(void)
2113{
2114 kmem_cache_shrink(global.slab_execute_cbs);
2115 kmem_cache_shrink(global.slab_requests);
2116}
2117
2118static void i915_global_request_exit(void)
2119{
2120 kmem_cache_destroy(global.slab_execute_cbs);
2121 kmem_cache_destroy(global.slab_requests);
2122}
2123
2124static struct i915_global_request global = { {
2125 .shrink = i915_global_request_shrink,
2126 .exit = i915_global_request_exit,
2127} };
2128
2129int __init i915_global_request_init(void)
2130{
2131 global.slab_requests =
2132 kmem_cache_create("i915_request",
2133 sizeof(struct i915_request),
2134 __alignof__(struct i915_request),
2135 SLAB_HWCACHE_ALIGN |
2136 SLAB_RECLAIM_ACCOUNT |
2137 SLAB_TYPESAFE_BY_RCU,
2138 __i915_request_ctor);
2139 if (!global.slab_requests)
2140 return -ENOMEM;
2141
2142 global.slab_execute_cbs = KMEM_CACHE(execute_cb,
2143 SLAB_HWCACHE_ALIGN |
2144 SLAB_RECLAIM_ACCOUNT |
2145 SLAB_TYPESAFE_BY_RCU);
2146 if (!global.slab_execute_cbs)
2147 goto err_requests;
2148
2149 i915_global_register(&global.base);
2150 return 0;
2151
2152err_requests:
2153 kmem_cache_destroy(global.slab_requests);
2154 return -ENOMEM;
2155}
2156