1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/dma-fence-array.h>
26#include <linux/dma-fence-chain.h>
27#include <linux/irq_work.h>
28#include <linux/prefetch.h>
29#include <linux/sched.h>
30#include <linux/sched/clock.h>
31#include <linux/sched/signal.h>
32
33#include "gem/i915_gem_context.h"
34#include "gt/intel_breadcrumbs.h"
35#include "gt/intel_context.h"
36#include "gt/intel_engine.h"
37#include "gt/intel_engine_heartbeat.h"
38#include "gt/intel_gpu_commands.h"
39#include "gt/intel_reset.h"
40#include "gt/intel_ring.h"
41#include "gt/intel_rps.h"
42
43#include "i915_active.h"
44#include "i915_drv.h"
45#include "i915_trace.h"
46#include "intel_pm.h"
47
48struct execute_cb {
49 struct irq_work work;
50 struct i915_sw_fence *fence;
51 struct i915_request *signal;
52};
53
54static struct kmem_cache *slab_requests;
55static struct kmem_cache *slab_execute_cbs;
56
57static const char *i915_fence_get_driver_name(struct dma_fence *fence)
58{
59 return dev_name(to_request(fence)->engine->i915->drm.dev);
60}
61
62static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
63{
64 const struct i915_gem_context *ctx;
65
66
67
68
69
70
71
72
73
74
75 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
76 return "signaled";
77
78 ctx = i915_request_gem_context(to_request(fence));
79 if (!ctx)
80 return "[" DRIVER_NAME "]";
81
82 return ctx->name;
83}
84
85static bool i915_fence_signaled(struct dma_fence *fence)
86{
87 return i915_request_completed(to_request(fence));
88}
89
90static bool i915_fence_enable_signaling(struct dma_fence *fence)
91{
92 return i915_request_enable_breadcrumb(to_request(fence));
93}
94
95static signed long i915_fence_wait(struct dma_fence *fence,
96 bool interruptible,
97 signed long timeout)
98{
99 return i915_request_wait(to_request(fence),
100 interruptible | I915_WAIT_PRIORITY,
101 timeout);
102}
103
104struct kmem_cache *i915_request_slab_cache(void)
105{
106 return slab_requests;
107}
108
109static void i915_fence_release(struct dma_fence *fence)
110{
111 struct i915_request *rq = to_request(fence);
112
113 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
114 rq->guc_prio != GUC_PRIO_FINI);
115
116
117
118
119
120
121
122
123 i915_sw_fence_fini(&rq->submit);
124 i915_sw_fence_fini(&rq->semaphore);
125
126
127
128
129
130
131 if (!intel_engine_is_virtual(rq->engine) &&
132 !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
133 intel_context_put(rq->context);
134 return;
135 }
136
137 intel_context_put(rq->context);
138
139 kmem_cache_free(slab_requests, rq);
140}
141
142const struct dma_fence_ops i915_fence_ops = {
143 .get_driver_name = i915_fence_get_driver_name,
144 .get_timeline_name = i915_fence_get_timeline_name,
145 .enable_signaling = i915_fence_enable_signaling,
146 .signaled = i915_fence_signaled,
147 .wait = i915_fence_wait,
148 .release = i915_fence_release,
149};
150
151static void irq_execute_cb(struct irq_work *wrk)
152{
153 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
154
155 i915_sw_fence_complete(cb->fence);
156 kmem_cache_free(slab_execute_cbs, cb);
157}
158
159static __always_inline void
160__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
161{
162 struct execute_cb *cb, *cn;
163
164 if (llist_empty(&rq->execute_cb))
165 return;
166
167 llist_for_each_entry_safe(cb, cn,
168 llist_del_all(&rq->execute_cb),
169 work.node.llist)
170 fn(&cb->work);
171}
172
173static void __notify_execute_cb_irq(struct i915_request *rq)
174{
175 __notify_execute_cb(rq, irq_work_queue);
176}
177
178static bool irq_work_imm(struct irq_work *wrk)
179{
180 wrk->func(wrk);
181 return false;
182}
183
184void i915_request_notify_execute_cb_imm(struct i915_request *rq)
185{
186 __notify_execute_cb(rq, irq_work_imm);
187}
188
189static void free_capture_list(struct i915_request *request)
190{
191 struct i915_capture_list *capture;
192
193 capture = fetch_and_zero(&request->capture_list);
194 while (capture) {
195 struct i915_capture_list *next = capture->next;
196
197 kfree(capture);
198 capture = next;
199 }
200}
201
202static void __i915_request_fill(struct i915_request *rq, u8 val)
203{
204 void *vaddr = rq->ring->vaddr;
205 u32 head;
206
207 head = rq->infix;
208 if (rq->postfix < head) {
209 memset(vaddr + head, val, rq->ring->size - head);
210 head = 0;
211 }
212 memset(vaddr + head, val, rq->postfix - head);
213}
214
215
216
217
218
219
220
221
222
223
224
225bool
226i915_request_active_engine(struct i915_request *rq,
227 struct intel_engine_cs **active)
228{
229 struct intel_engine_cs *engine, *locked;
230 bool ret = false;
231
232
233
234
235
236
237
238
239 locked = READ_ONCE(rq->engine);
240 spin_lock_irq(&locked->sched_engine->lock);
241 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
242 spin_unlock(&locked->sched_engine->lock);
243 locked = engine;
244 spin_lock(&locked->sched_engine->lock);
245 }
246
247 if (i915_request_is_active(rq)) {
248 if (!__i915_request_is_complete(rq))
249 *active = locked;
250 ret = true;
251 }
252
253 spin_unlock_irq(&locked->sched_engine->lock);
254
255 return ret;
256}
257
258static void __rq_init_watchdog(struct i915_request *rq)
259{
260 rq->watchdog.timer.function = NULL;
261}
262
263static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
264{
265 struct i915_request *rq =
266 container_of(hrtimer, struct i915_request, watchdog.timer);
267 struct intel_gt *gt = rq->engine->gt;
268
269 if (!i915_request_completed(rq)) {
270 if (llist_add(&rq->watchdog.link, >->watchdog.list))
271 schedule_work(>->watchdog.work);
272 } else {
273 i915_request_put(rq);
274 }
275
276 return HRTIMER_NORESTART;
277}
278
279static void __rq_arm_watchdog(struct i915_request *rq)
280{
281 struct i915_request_watchdog *wdg = &rq->watchdog;
282 struct intel_context *ce = rq->context;
283
284 if (!ce->watchdog.timeout_us)
285 return;
286
287 i915_request_get(rq);
288
289 hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
290 wdg->timer.function = __rq_watchdog_expired;
291 hrtimer_start_range_ns(&wdg->timer,
292 ns_to_ktime(ce->watchdog.timeout_us *
293 NSEC_PER_USEC),
294 NSEC_PER_MSEC,
295 HRTIMER_MODE_REL);
296}
297
298static void __rq_cancel_watchdog(struct i915_request *rq)
299{
300 struct i915_request_watchdog *wdg = &rq->watchdog;
301
302 if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
303 i915_request_put(rq);
304}
305
306bool i915_request_retire(struct i915_request *rq)
307{
308 if (!__i915_request_is_complete(rq))
309 return false;
310
311 RQ_TRACE(rq, "\n");
312
313 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
314 trace_i915_request_retire(rq);
315 i915_request_mark_complete(rq);
316
317 __rq_cancel_watchdog(rq);
318
319
320
321
322
323
324
325
326
327
328 GEM_BUG_ON(!list_is_first(&rq->link,
329 &i915_request_timeline(rq)->requests));
330 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
331
332 __i915_request_fill(rq, POISON_FREE);
333 rq->ring->head = rq->postfix;
334
335 if (!i915_request_signaled(rq)) {
336 spin_lock_irq(&rq->lock);
337 dma_fence_signal_locked(&rq->fence);
338 spin_unlock_irq(&rq->lock);
339 }
340
341 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
342 atomic_dec(&rq->engine->gt->rps.num_waiters);
343
344
345
346
347
348
349
350
351
352
353
354 rq->engine->remove_active_request(rq);
355 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
356
357 __list_del_entry(&rq->link);
358
359 intel_context_exit(rq->context);
360 intel_context_unpin(rq->context);
361
362 free_capture_list(rq);
363 i915_sched_node_fini(&rq->sched);
364 i915_request_put(rq);
365
366 return true;
367}
368
369void i915_request_retire_upto(struct i915_request *rq)
370{
371 struct intel_timeline * const tl = i915_request_timeline(rq);
372 struct i915_request *tmp;
373
374 RQ_TRACE(rq, "\n");
375 GEM_BUG_ON(!__i915_request_is_complete(rq));
376
377 do {
378 tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
379 GEM_BUG_ON(!i915_request_completed(tmp));
380 } while (i915_request_retire(tmp) && tmp != rq);
381}
382
383static struct i915_request * const *
384__engine_active(struct intel_engine_cs *engine)
385{
386 return READ_ONCE(engine->execlists.active);
387}
388
389static bool __request_in_flight(const struct i915_request *signal)
390{
391 struct i915_request * const *port, *rq;
392 bool inflight = false;
393
394 if (!i915_request_is_ready(signal))
395 return false;
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433 if (!intel_context_inflight(signal->context))
434 return false;
435
436 rcu_read_lock();
437 for (port = __engine_active(signal->engine);
438 (rq = READ_ONCE(*port));
439 port++) {
440 if (rq->context == signal->context) {
441 inflight = i915_seqno_passed(rq->fence.seqno,
442 signal->fence.seqno);
443 break;
444 }
445 }
446 rcu_read_unlock();
447
448 return inflight;
449}
450
451static int
452__await_execution(struct i915_request *rq,
453 struct i915_request *signal,
454 gfp_t gfp)
455{
456 struct execute_cb *cb;
457
458 if (i915_request_is_active(signal))
459 return 0;
460
461 cb = kmem_cache_alloc(slab_execute_cbs, gfp);
462 if (!cb)
463 return -ENOMEM;
464
465 cb->fence = &rq->submit;
466 i915_sw_fence_await(cb->fence);
467 init_irq_work(&cb->work, irq_execute_cb);
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482 if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
483 if (i915_request_is_active(signal) ||
484 __request_in_flight(signal))
485 i915_request_notify_execute_cb_imm(signal);
486 }
487
488 return 0;
489}
490
491static bool fatal_error(int error)
492{
493 switch (error) {
494 case 0:
495 case -EAGAIN:
496 case -ETIMEDOUT:
497 return false;
498 default:
499 return true;
500 }
501}
502
503void __i915_request_skip(struct i915_request *rq)
504{
505 GEM_BUG_ON(!fatal_error(rq->fence.error));
506
507 if (rq->infix == rq->postfix)
508 return;
509
510 RQ_TRACE(rq, "error: %d\n", rq->fence.error);
511
512
513
514
515
516
517 __i915_request_fill(rq, 0);
518 rq->infix = rq->postfix;
519}
520
521bool i915_request_set_error_once(struct i915_request *rq, int error)
522{
523 int old;
524
525 GEM_BUG_ON(!IS_ERR_VALUE((long)error));
526
527 if (i915_request_signaled(rq))
528 return false;
529
530 old = READ_ONCE(rq->fence.error);
531 do {
532 if (fatal_error(old))
533 return false;
534 } while (!try_cmpxchg(&rq->fence.error, &old, error));
535
536 return true;
537}
538
539struct i915_request *i915_request_mark_eio(struct i915_request *rq)
540{
541 if (__i915_request_is_complete(rq))
542 return NULL;
543
544 GEM_BUG_ON(i915_request_signaled(rq));
545
546
547 rq = i915_request_get(rq);
548
549 i915_request_set_error_once(rq, -EIO);
550 i915_request_mark_complete(rq);
551
552 return rq;
553}
554
555bool __i915_request_submit(struct i915_request *request)
556{
557 struct intel_engine_cs *engine = request->engine;
558 bool result = false;
559
560 RQ_TRACE(request, "\n");
561
562 GEM_BUG_ON(!irqs_disabled());
563 lockdep_assert_held(&engine->sched_engine->lock);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 if (__i915_request_is_complete(request)) {
582 list_del_init(&request->sched.link);
583 goto active;
584 }
585
586 if (unlikely(intel_context_is_banned(request->context)))
587 i915_request_set_error_once(request, -EIO);
588
589 if (unlikely(fatal_error(request->fence.error)))
590 __i915_request_skip(request);
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608 if (request->sched.semaphores &&
609 i915_sw_fence_signaled(&request->semaphore))
610 engine->saturated |= request->sched.semaphores;
611
612 engine->emit_fini_breadcrumb(request,
613 request->ring->vaddr + request->postfix);
614
615 trace_i915_request_execute(request);
616 if (engine->bump_serial)
617 engine->bump_serial(engine);
618 else
619 engine->serial++;
620
621 result = true;
622
623 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
624 engine->add_active_request(request);
625active:
626 clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
627 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
628
629
630
631
632
633
634
635
636
637
638
639 __notify_execute_cb_irq(request);
640
641
642 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
643 i915_request_enable_breadcrumb(request);
644
645 return result;
646}
647
648void i915_request_submit(struct i915_request *request)
649{
650 struct intel_engine_cs *engine = request->engine;
651 unsigned long flags;
652
653
654 spin_lock_irqsave(&engine->sched_engine->lock, flags);
655
656 __i915_request_submit(request);
657
658 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
659}
660
661void __i915_request_unsubmit(struct i915_request *request)
662{
663 struct intel_engine_cs *engine = request->engine;
664
665
666
667
668
669 RQ_TRACE(request, "\n");
670
671 GEM_BUG_ON(!irqs_disabled());
672 lockdep_assert_held(&engine->sched_engine->lock);
673
674
675
676
677
678
679
680
681 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
682 clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
683 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
684 i915_request_cancel_breadcrumb(request);
685
686
687 if (request->sched.semaphores && __i915_request_has_started(request))
688 request->sched.semaphores = 0;
689
690
691
692
693
694
695
696
697}
698
699void i915_request_unsubmit(struct i915_request *request)
700{
701 struct intel_engine_cs *engine = request->engine;
702 unsigned long flags;
703
704
705 spin_lock_irqsave(&engine->sched_engine->lock, flags);
706
707 __i915_request_unsubmit(request);
708
709 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
710}
711
712void i915_request_cancel(struct i915_request *rq, int error)
713{
714 if (!i915_request_set_error_once(rq, error))
715 return;
716
717 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
718
719 intel_context_cancel_request(rq->context, rq);
720}
721
722static int __i915_sw_fence_call
723submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
724{
725 struct i915_request *request =
726 container_of(fence, typeof(*request), submit);
727
728 switch (state) {
729 case FENCE_COMPLETE:
730 trace_i915_request_submit(request);
731
732 if (unlikely(fence->error))
733 i915_request_set_error_once(request, fence->error);
734 else
735 __rq_arm_watchdog(request);
736
737
738
739
740
741
742
743
744
745 rcu_read_lock();
746 request->engine->submit_request(request);
747 rcu_read_unlock();
748 break;
749
750 case FENCE_FREE:
751 i915_request_put(request);
752 break;
753 }
754
755 return NOTIFY_DONE;
756}
757
758static int __i915_sw_fence_call
759semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
760{
761 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
762
763 switch (state) {
764 case FENCE_COMPLETE:
765 break;
766
767 case FENCE_FREE:
768 i915_request_put(rq);
769 break;
770 }
771
772 return NOTIFY_DONE;
773}
774
775static void retire_requests(struct intel_timeline *tl)
776{
777 struct i915_request *rq, *rn;
778
779 list_for_each_entry_safe(rq, rn, &tl->requests, link)
780 if (!i915_request_retire(rq))
781 break;
782}
783
784static noinline struct i915_request *
785request_alloc_slow(struct intel_timeline *tl,
786 struct i915_request **rsvd,
787 gfp_t gfp)
788{
789 struct i915_request *rq;
790
791
792 if (!gfpflags_allow_blocking(gfp)) {
793 rq = xchg(rsvd, NULL);
794 if (!rq)
795 goto out;
796
797 return rq;
798 }
799
800 if (list_empty(&tl->requests))
801 goto out;
802
803
804 rq = list_first_entry(&tl->requests, typeof(*rq), link);
805 i915_request_retire(rq);
806
807 rq = kmem_cache_alloc(slab_requests,
808 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
809 if (rq)
810 return rq;
811
812
813 rq = list_last_entry(&tl->requests, typeof(*rq), link);
814 cond_synchronize_rcu(rq->rcustate);
815
816
817 retire_requests(tl);
818
819out:
820 return kmem_cache_alloc(slab_requests, gfp);
821}
822
823static void __i915_request_ctor(void *arg)
824{
825 struct i915_request *rq = arg;
826
827 spin_lock_init(&rq->lock);
828 i915_sched_node_init(&rq->sched);
829 i915_sw_fence_init(&rq->submit, submit_notify);
830 i915_sw_fence_init(&rq->semaphore, semaphore_notify);
831
832 rq->capture_list = NULL;
833
834 init_llist_head(&rq->execute_cb);
835}
836
837struct i915_request *
838__i915_request_create(struct intel_context *ce, gfp_t gfp)
839{
840 struct intel_timeline *tl = ce->timeline;
841 struct i915_request *rq;
842 u32 seqno;
843 int ret;
844
845 might_alloc(gfp);
846
847
848 __intel_context_pin(ce);
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879 rq = kmem_cache_alloc(slab_requests,
880 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
881 if (unlikely(!rq)) {
882 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
883 if (!rq) {
884 ret = -ENOMEM;
885 goto err_unreserve;
886 }
887 }
888
889
890
891
892
893
894
895
896
897
898
899
900
901 rq->context = intel_context_get(ce);
902 rq->engine = ce->engine;
903 rq->ring = ce->ring;
904 rq->execution_mask = ce->engine->mask;
905
906 ret = intel_timeline_get_seqno(tl, rq, &seqno);
907 if (ret)
908 goto err_free;
909
910 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
911 tl->fence_context, seqno);
912
913 RCU_INIT_POINTER(rq->timeline, tl);
914 rq->hwsp_seqno = tl->hwsp_seqno;
915 GEM_BUG_ON(__i915_request_is_complete(rq));
916
917 rq->rcustate = get_state_synchronize_rcu();
918
919 rq->guc_prio = GUC_PRIO_INIT;
920
921
922 i915_sw_fence_reinit(&i915_request_get(rq)->submit);
923 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
924
925 i915_sched_node_reinit(&rq->sched);
926
927
928 rq->batch = NULL;
929 __rq_init_watchdog(rq);
930 GEM_BUG_ON(rq->capture_list);
931 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
932
933
934
935
936
937
938
939
940
941
942
943
944
945 rq->reserved_space =
946 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
947
948
949
950
951
952
953
954 rq->head = rq->ring->emit;
955
956 ret = rq->engine->request_alloc(rq);
957 if (ret)
958 goto err_unwind;
959
960 rq->infix = rq->ring->emit;
961
962 intel_context_mark_active(ce);
963 list_add_tail_rcu(&rq->link, &tl->requests);
964
965 return rq;
966
967err_unwind:
968 ce->ring->emit = rq->head;
969
970
971 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
972 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
973
974err_free:
975 intel_context_put(ce);
976 kmem_cache_free(slab_requests, rq);
977err_unreserve:
978 intel_context_unpin(ce);
979 return ERR_PTR(ret);
980}
981
982struct i915_request *
983i915_request_create(struct intel_context *ce)
984{
985 struct i915_request *rq;
986 struct intel_timeline *tl;
987
988 tl = intel_context_timeline_lock(ce);
989 if (IS_ERR(tl))
990 return ERR_CAST(tl);
991
992
993 rq = list_first_entry(&tl->requests, typeof(*rq), link);
994 if (!list_is_last(&rq->link, &tl->requests))
995 i915_request_retire(rq);
996
997 intel_context_enter(ce);
998 rq = __i915_request_create(ce, GFP_KERNEL);
999 intel_context_exit(ce);
1000 if (IS_ERR(rq))
1001 goto err_unlock;
1002
1003
1004 rq->cookie = lockdep_pin_lock(&tl->mutex);
1005
1006 return rq;
1007
1008err_unlock:
1009 intel_context_timeline_unlock(tl);
1010 return rq;
1011}
1012
1013static int
1014i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1015{
1016 struct dma_fence *fence;
1017 int err;
1018
1019 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1020 return 0;
1021
1022 if (i915_request_started(signal))
1023 return 0;
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 fence = NULL;
1034 rcu_read_lock();
1035 do {
1036 struct list_head *pos = READ_ONCE(signal->link.prev);
1037 struct i915_request *prev;
1038
1039
1040 if (unlikely(__i915_request_has_started(signal)))
1041 break;
1042
1043
1044 if (pos == &rcu_dereference(signal->timeline)->requests)
1045 break;
1046
1047
1048
1049
1050
1051
1052
1053 prev = list_entry(pos, typeof(*prev), link);
1054 if (!i915_request_get_rcu(prev))
1055 break;
1056
1057
1058 if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
1059 i915_request_put(prev);
1060 break;
1061 }
1062
1063 fence = &prev->fence;
1064 } while (0);
1065 rcu_read_unlock();
1066 if (!fence)
1067 return 0;
1068
1069 err = 0;
1070 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1071 err = i915_sw_fence_await_dma_fence(&rq->submit,
1072 fence, 0,
1073 I915_FENCE_GFP);
1074 dma_fence_put(fence);
1075
1076 return err;
1077}
1078
1079static intel_engine_mask_t
1080already_busywaiting(struct i915_request *rq)
1081{
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1095}
1096
1097static int
1098__emit_semaphore_wait(struct i915_request *to,
1099 struct i915_request *from,
1100 u32 seqno)
1101{
1102 const int has_token = GRAPHICS_VER(to->engine->i915) >= 12;
1103 u32 hwsp_offset;
1104 int len, err;
1105 u32 *cs;
1106
1107 GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8);
1108 GEM_BUG_ON(i915_request_has_initial_breadcrumb(to));
1109
1110
1111 err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
1112 if (err)
1113 return err;
1114
1115 len = 4;
1116 if (has_token)
1117 len += 2;
1118
1119 cs = intel_ring_begin(to, len);
1120 if (IS_ERR(cs))
1121 return PTR_ERR(cs);
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 *cs++ = (MI_SEMAPHORE_WAIT |
1132 MI_SEMAPHORE_GLOBAL_GTT |
1133 MI_SEMAPHORE_POLL |
1134 MI_SEMAPHORE_SAD_GTE_SDD) +
1135 has_token;
1136 *cs++ = seqno;
1137 *cs++ = hwsp_offset;
1138 *cs++ = 0;
1139 if (has_token) {
1140 *cs++ = 0;
1141 *cs++ = MI_NOOP;
1142 }
1143
1144 intel_ring_advance(to, cs);
1145 return 0;
1146}
1147
1148static int
1149emit_semaphore_wait(struct i915_request *to,
1150 struct i915_request *from,
1151 gfp_t gfp)
1152{
1153 const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
1154 struct i915_sw_fence *wait = &to->submit;
1155
1156 if (!intel_context_use_semaphores(to->context))
1157 goto await_fence;
1158
1159 if (i915_request_has_initial_breadcrumb(to))
1160 goto await_fence;
1161
1162
1163
1164
1165
1166
1167
1168 if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN)
1169 goto await_fence;
1170
1171
1172 if (already_busywaiting(to) & mask)
1173 goto await_fence;
1174
1175 if (i915_request_await_start(to, from) < 0)
1176 goto await_fence;
1177
1178
1179 if (__await_execution(to, from, gfp))
1180 goto await_fence;
1181
1182 if (__emit_semaphore_wait(to, from, from->fence.seqno))
1183 goto await_fence;
1184
1185 to->sched.semaphores |= mask;
1186 wait = &to->semaphore;
1187
1188await_fence:
1189 return i915_sw_fence_await_dma_fence(wait,
1190 &from->fence, 0,
1191 I915_FENCE_GFP);
1192}
1193
1194static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
1195 struct dma_fence *fence)
1196{
1197 return __intel_timeline_sync_is_later(tl,
1198 fence->context,
1199 fence->seqno - 1);
1200}
1201
1202static int intel_timeline_sync_set_start(struct intel_timeline *tl,
1203 const struct dma_fence *fence)
1204{
1205 return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
1206}
1207
1208static int
1209__i915_request_await_execution(struct i915_request *to,
1210 struct i915_request *from)
1211{
1212 int err;
1213
1214 GEM_BUG_ON(intel_context_is_barrier(from->context));
1215
1216
1217 err = __await_execution(to, from, I915_FENCE_GFP);
1218 if (err)
1219 return err;
1220
1221
1222 if (intel_timeline_sync_has_start(i915_request_timeline(to),
1223 &from->fence))
1224 return 0;
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236 err = i915_request_await_start(to, from);
1237 if (err < 0)
1238 return err;
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 if (intel_engine_has_semaphores(to->engine) &&
1260 !i915_request_has_initial_breadcrumb(to)) {
1261 err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
1262 if (err < 0)
1263 return err;
1264 }
1265
1266
1267 if (to->engine->sched_engine->schedule) {
1268 err = i915_sched_node_add_dependency(&to->sched,
1269 &from->sched,
1270 I915_DEPENDENCY_WEAK);
1271 if (err < 0)
1272 return err;
1273 }
1274
1275 return intel_timeline_sync_set_start(i915_request_timeline(to),
1276 &from->fence);
1277}
1278
1279static void mark_external(struct i915_request *rq)
1280{
1281
1282
1283
1284
1285
1286
1287
1288
1289 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1290}
1291
1292static int
1293__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1294{
1295 mark_external(rq);
1296 return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1297 i915_fence_context_timeout(rq->engine->i915,
1298 fence->context),
1299 I915_FENCE_GFP);
1300}
1301
1302static int
1303i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1304{
1305 struct dma_fence *iter;
1306 int err = 0;
1307
1308 if (!to_dma_fence_chain(fence))
1309 return __i915_request_await_external(rq, fence);
1310
1311 dma_fence_chain_for_each(iter, fence) {
1312 struct dma_fence_chain *chain = to_dma_fence_chain(iter);
1313
1314 if (!dma_fence_is_i915(chain->fence)) {
1315 err = __i915_request_await_external(rq, iter);
1316 break;
1317 }
1318
1319 err = i915_request_await_dma_fence(rq, chain->fence);
1320 if (err < 0)
1321 break;
1322 }
1323
1324 dma_fence_put(iter);
1325 return err;
1326}
1327
1328int
1329i915_request_await_execution(struct i915_request *rq,
1330 struct dma_fence *fence)
1331{
1332 struct dma_fence **child = &fence;
1333 unsigned int nchild = 1;
1334 int ret;
1335
1336 if (dma_fence_is_array(fence)) {
1337 struct dma_fence_array *array = to_dma_fence_array(fence);
1338
1339
1340
1341 child = array->fences;
1342 nchild = array->num_fences;
1343 GEM_BUG_ON(!nchild);
1344 }
1345
1346 do {
1347 fence = *child++;
1348 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1349 continue;
1350
1351 if (fence->context == rq->fence.context)
1352 continue;
1353
1354
1355
1356
1357
1358
1359 if (dma_fence_is_i915(fence))
1360 ret = __i915_request_await_execution(rq,
1361 to_request(fence));
1362 else
1363 ret = i915_request_await_external(rq, fence);
1364 if (ret < 0)
1365 return ret;
1366 } while (--nchild);
1367
1368 return 0;
1369}
1370
1371static int
1372await_request_submit(struct i915_request *to, struct i915_request *from)
1373{
1374
1375
1376
1377
1378
1379
1380
1381
1382 if (to->engine == READ_ONCE(from->engine))
1383 return i915_sw_fence_await_sw_fence_gfp(&to->submit,
1384 &from->submit,
1385 I915_FENCE_GFP);
1386 else
1387 return __i915_request_await_execution(to, from);
1388}
1389
1390static int
1391i915_request_await_request(struct i915_request *to, struct i915_request *from)
1392{
1393 int ret;
1394
1395 GEM_BUG_ON(to == from);
1396 GEM_BUG_ON(to->timeline == from->timeline);
1397
1398 if (i915_request_completed(from)) {
1399 i915_sw_fence_set_error_once(&to->submit, from->fence.error);
1400 return 0;
1401 }
1402
1403 if (to->engine->sched_engine->schedule) {
1404 ret = i915_sched_node_add_dependency(&to->sched,
1405 &from->sched,
1406 I915_DEPENDENCY_EXTERNAL);
1407 if (ret < 0)
1408 return ret;
1409 }
1410
1411 if (!intel_engine_uses_guc(to->engine) &&
1412 is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
1413 ret = await_request_submit(to, from);
1414 else
1415 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
1416 if (ret < 0)
1417 return ret;
1418
1419 return 0;
1420}
1421
1422int
1423i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1424{
1425 struct dma_fence **child = &fence;
1426 unsigned int nchild = 1;
1427 int ret;
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 if (dma_fence_is_array(fence)) {
1438 struct dma_fence_array *array = to_dma_fence_array(fence);
1439
1440 child = array->fences;
1441 nchild = array->num_fences;
1442 GEM_BUG_ON(!nchild);
1443 }
1444
1445 do {
1446 fence = *child++;
1447 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1448 continue;
1449
1450
1451
1452
1453
1454
1455 if (fence->context == rq->fence.context)
1456 continue;
1457
1458
1459 if (fence->context &&
1460 intel_timeline_sync_is_later(i915_request_timeline(rq),
1461 fence))
1462 continue;
1463
1464 if (dma_fence_is_i915(fence))
1465 ret = i915_request_await_request(rq, to_request(fence));
1466 else
1467 ret = i915_request_await_external(rq, fence);
1468 if (ret < 0)
1469 return ret;
1470
1471
1472 if (fence->context)
1473 intel_timeline_sync_set(i915_request_timeline(rq),
1474 fence);
1475 } while (--nchild);
1476
1477 return 0;
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500int
1501i915_request_await_object(struct i915_request *to,
1502 struct drm_i915_gem_object *obj,
1503 bool write)
1504{
1505 struct dma_fence *excl;
1506 int ret = 0;
1507
1508 if (write) {
1509 struct dma_fence **shared;
1510 unsigned int count, i;
1511
1512 ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
1513 &shared);
1514 if (ret)
1515 return ret;
1516
1517 for (i = 0; i < count; i++) {
1518 ret = i915_request_await_dma_fence(to, shared[i]);
1519 if (ret)
1520 break;
1521
1522 dma_fence_put(shared[i]);
1523 }
1524
1525 for (; i < count; i++)
1526 dma_fence_put(shared[i]);
1527 kfree(shared);
1528 } else {
1529 excl = dma_resv_get_excl_unlocked(obj->base.resv);
1530 }
1531
1532 if (excl) {
1533 if (ret == 0)
1534 ret = i915_request_await_dma_fence(to, excl);
1535
1536 dma_fence_put(excl);
1537 }
1538
1539 return ret;
1540}
1541
1542static struct i915_request *
1543__i915_request_add_to_timeline(struct i915_request *rq)
1544{
1545 struct intel_timeline *timeline = i915_request_timeline(rq);
1546 struct i915_request *prev;
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 prev = to_request(__i915_active_fence_set(&timeline->last_request,
1569 &rq->fence));
1570 if (prev && !__i915_request_is_complete(prev)) {
1571 bool uses_guc = intel_engine_uses_guc(rq->engine);
1572
1573
1574
1575
1576
1577
1578
1579 GEM_BUG_ON(prev->context == rq->context &&
1580 i915_seqno_passed(prev->fence.seqno,
1581 rq->fence.seqno));
1582
1583 if ((!uses_guc &&
1584 is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
1585 (uses_guc && prev->context == rq->context))
1586 i915_sw_fence_await_sw_fence(&rq->submit,
1587 &prev->submit,
1588 &rq->submitq);
1589 else
1590 __i915_sw_fence_await_dma_fence(&rq->submit,
1591 &prev->fence,
1592 &rq->dmaq);
1593 if (rq->engine->sched_engine->schedule)
1594 __i915_sched_node_add_dependency(&rq->sched,
1595 &prev->sched,
1596 &rq->dep,
1597 0);
1598 }
1599
1600
1601
1602
1603
1604
1605 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1606
1607 return prev;
1608}
1609
1610
1611
1612
1613
1614
1615struct i915_request *__i915_request_commit(struct i915_request *rq)
1616{
1617 struct intel_engine_cs *engine = rq->engine;
1618 struct intel_ring *ring = rq->ring;
1619 u32 *cs;
1620
1621 RQ_TRACE(rq, "\n");
1622
1623
1624
1625
1626
1627
1628 GEM_BUG_ON(rq->reserved_space > ring->space);
1629 rq->reserved_space = 0;
1630 rq->emitted_jiffies = jiffies;
1631
1632
1633
1634
1635
1636
1637
1638 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1639 GEM_BUG_ON(IS_ERR(cs));
1640 rq->postfix = intel_ring_offset(rq, cs);
1641
1642 return __i915_request_add_to_timeline(rq);
1643}
1644
1645void __i915_request_queue_bh(struct i915_request *rq)
1646{
1647 i915_sw_fence_commit(&rq->semaphore);
1648 i915_sw_fence_commit(&rq->submit);
1649}
1650
1651void __i915_request_queue(struct i915_request *rq,
1652 const struct i915_sched_attr *attr)
1653{
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665 if (attr && rq->engine->sched_engine->schedule)
1666 rq->engine->sched_engine->schedule(rq, attr);
1667
1668 local_bh_disable();
1669 __i915_request_queue_bh(rq);
1670 local_bh_enable();
1671}
1672
1673void i915_request_add(struct i915_request *rq)
1674{
1675 struct intel_timeline * const tl = i915_request_timeline(rq);
1676 struct i915_sched_attr attr = {};
1677 struct i915_gem_context *ctx;
1678
1679 lockdep_assert_held(&tl->mutex);
1680 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1681
1682 trace_i915_request_add(rq);
1683 __i915_request_commit(rq);
1684
1685
1686 rcu_read_lock();
1687 ctx = rcu_dereference(rq->context->gem_context);
1688 if (ctx)
1689 attr = ctx->sched;
1690 rcu_read_unlock();
1691
1692 __i915_request_queue(rq, &attr);
1693
1694 mutex_unlock(&tl->mutex);
1695}
1696
1697static unsigned long local_clock_ns(unsigned int *cpu)
1698{
1699 unsigned long t;
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 *cpu = get_cpu();
1714 t = local_clock();
1715 put_cpu();
1716
1717 return t;
1718}
1719
1720static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1721{
1722 unsigned int this_cpu;
1723
1724 if (time_after(local_clock_ns(&this_cpu), timeout))
1725 return true;
1726
1727 return this_cpu != cpu;
1728}
1729
1730static bool __i915_spin_request(struct i915_request * const rq, int state)
1731{
1732 unsigned long timeout_ns;
1733 unsigned int cpu;
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (!i915_request_is_running(rq))
1747 return false;
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1761 timeout_ns += local_clock_ns(&cpu);
1762 do {
1763 if (dma_fence_is_signaled(&rq->fence))
1764 return true;
1765
1766 if (signal_pending_state(state, current))
1767 break;
1768
1769 if (busywait_stop(timeout_ns, cpu))
1770 break;
1771
1772 cpu_relax();
1773 } while (!need_resched());
1774
1775 return false;
1776}
1777
1778struct request_wait {
1779 struct dma_fence_cb cb;
1780 struct task_struct *tsk;
1781};
1782
1783static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1784{
1785 struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1786
1787 wake_up_process(fetch_and_zero(&wait->tsk));
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805long i915_request_wait(struct i915_request *rq,
1806 unsigned int flags,
1807 long timeout)
1808{
1809 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1810 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1811 struct request_wait wait;
1812
1813 might_sleep();
1814 GEM_BUG_ON(timeout < 0);
1815
1816 if (dma_fence_is_signaled(&rq->fence))
1817 return timeout;
1818
1819 if (!timeout)
1820 return -ETIME;
1821
1822 trace_i915_request_wait_begin(rq, flags);
1823
1824
1825
1826
1827
1828
1829
1830 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855 if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
1856 __i915_spin_request(rq, state))
1857 goto out;
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
1872 intel_rps_boost(rq);
1873
1874 wait.tsk = current;
1875 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1876 goto out;
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (i915_request_is_ready(rq))
1894 __intel_engine_flush_submission(rq->engine, false);
1895
1896 for (;;) {
1897 set_current_state(state);
1898
1899 if (dma_fence_is_signaled(&rq->fence))
1900 break;
1901
1902 if (signal_pending_state(state, current)) {
1903 timeout = -ERESTARTSYS;
1904 break;
1905 }
1906
1907 if (!timeout) {
1908 timeout = -ETIME;
1909 break;
1910 }
1911
1912 timeout = io_schedule_timeout(timeout);
1913 }
1914 __set_current_state(TASK_RUNNING);
1915
1916 if (READ_ONCE(wait.tsk))
1917 dma_fence_remove_callback(&rq->fence, &wait.cb);
1918 GEM_BUG_ON(!list_empty(&wait.cb.node));
1919
1920out:
1921 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
1922 trace_i915_request_wait_end(rq);
1923 return timeout;
1924}
1925
1926static int print_sched_attr(const struct i915_sched_attr *attr,
1927 char *buf, int x, int len)
1928{
1929 if (attr->priority == I915_PRIORITY_INVALID)
1930 return x;
1931
1932 x += snprintf(buf + x, len - x,
1933 " prio=%d", attr->priority);
1934
1935 return x;
1936}
1937
1938static char queue_status(const struct i915_request *rq)
1939{
1940 if (i915_request_is_active(rq))
1941 return 'E';
1942
1943 if (i915_request_is_ready(rq))
1944 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
1945
1946 return 'U';
1947}
1948
1949static const char *run_status(const struct i915_request *rq)
1950{
1951 if (__i915_request_is_complete(rq))
1952 return "!";
1953
1954 if (__i915_request_has_started(rq))
1955 return "*";
1956
1957 if (!i915_sw_fence_signaled(&rq->semaphore))
1958 return "&";
1959
1960 return "";
1961}
1962
1963static const char *fence_status(const struct i915_request *rq)
1964{
1965 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
1966 return "+";
1967
1968 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
1969 return "-";
1970
1971 return "";
1972}
1973
1974void i915_request_show(struct drm_printer *m,
1975 const struct i915_request *rq,
1976 const char *prefix,
1977 int indent)
1978{
1979 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
1980 char buf[80] = "";
1981 int x = 0;
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2014
2015 drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
2016 prefix, indent, " ",
2017 queue_status(rq),
2018 rq->fence.context, rq->fence.seqno,
2019 run_status(rq),
2020 fence_status(rq),
2021 buf,
2022 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2023 name);
2024}
2025
2026static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
2027{
2028 u32 ring = ENGINE_READ(engine, RING_START);
2029
2030 return ring == i915_ggtt_offset(rq->ring->vma);
2031}
2032
2033static bool match_ring(struct i915_request *rq)
2034{
2035 struct intel_engine_cs *engine;
2036 bool found;
2037 int i;
2038
2039 if (!intel_engine_is_virtual(rq->engine))
2040 return engine_match_ring(rq->engine, rq);
2041
2042 found = false;
2043 i = 0;
2044 while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
2045 found = engine_match_ring(engine, rq);
2046 if (found)
2047 break;
2048 }
2049
2050 return found;
2051}
2052
2053enum i915_request_state i915_test_request_state(struct i915_request *rq)
2054{
2055 if (i915_request_completed(rq))
2056 return I915_REQUEST_COMPLETE;
2057
2058 if (!i915_request_started(rq))
2059 return I915_REQUEST_PENDING;
2060
2061 if (match_ring(rq))
2062 return I915_REQUEST_ACTIVE;
2063
2064 return I915_REQUEST_QUEUED;
2065}
2066
2067#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2068#include "selftests/mock_request.c"
2069#include "selftests/i915_request.c"
2070#endif
2071
2072void i915_request_module_exit(void)
2073{
2074 kmem_cache_destroy(slab_execute_cbs);
2075 kmem_cache_destroy(slab_requests);
2076}
2077
2078int __init i915_request_module_init(void)
2079{
2080 slab_requests =
2081 kmem_cache_create("i915_request",
2082 sizeof(struct i915_request),
2083 __alignof__(struct i915_request),
2084 SLAB_HWCACHE_ALIGN |
2085 SLAB_RECLAIM_ACCOUNT |
2086 SLAB_TYPESAFE_BY_RCU,
2087 __i915_request_ctor);
2088 if (!slab_requests)
2089 return -ENOMEM;
2090
2091 slab_execute_cbs = KMEM_CACHE(execute_cb,
2092 SLAB_HWCACHE_ALIGN |
2093 SLAB_RECLAIM_ACCOUNT |
2094 SLAB_TYPESAFE_BY_RCU);
2095 if (!slab_execute_cbs)
2096 goto err_requests;
2097
2098 return 0;
2099
2100err_requests:
2101 kmem_cache_destroy(slab_requests);
2102 return -ENOMEM;
2103}
2104