1
2
3
4
5
6
7#include <linux/debugobjects.h>
8
9#include "gt/intel_context.h"
10#include "gt/intel_engine_heartbeat.h"
11#include "gt/intel_engine_pm.h"
12#include "gt/intel_ring.h"
13
14#include "i915_drv.h"
15#include "i915_active.h"
16
17
18
19
20
21
22
23
24static struct kmem_cache *slab_cache;
25
26struct active_node {
27 struct rb_node node;
28 struct i915_active_fence base;
29 struct i915_active *ref;
30 u64 timeline;
31};
32
33#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
34
35static inline struct active_node *
36node_from_active(struct i915_active_fence *active)
37{
38 return container_of(active, struct active_node, base);
39}
40
41#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
42
43static inline bool is_barrier(const struct i915_active_fence *active)
44{
45 return IS_ERR(rcu_access_pointer(active->fence));
46}
47
48static inline struct llist_node *barrier_to_ll(struct active_node *node)
49{
50 GEM_BUG_ON(!is_barrier(&node->base));
51 return (struct llist_node *)&node->base.cb.node;
52}
53
54static inline struct intel_engine_cs *
55__barrier_to_engine(struct active_node *node)
56{
57 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
58}
59
60static inline struct intel_engine_cs *
61barrier_to_engine(struct active_node *node)
62{
63 GEM_BUG_ON(!is_barrier(&node->base));
64 return __barrier_to_engine(node);
65}
66
67static inline struct active_node *barrier_from_ll(struct llist_node *x)
68{
69 return container_of((struct list_head *)x,
70 struct active_node, base.cb.node);
71}
72
73#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
74
75static void *active_debug_hint(void *addr)
76{
77 struct i915_active *ref = addr;
78
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
80}
81
82static const struct debug_obj_descr active_debug_desc = {
83 .name = "i915_active",
84 .debug_hint = active_debug_hint,
85};
86
87static void debug_active_init(struct i915_active *ref)
88{
89 debug_object_init(ref, &active_debug_desc);
90}
91
92static void debug_active_activate(struct i915_active *ref)
93{
94 lockdep_assert_held(&ref->tree_lock);
95 if (!atomic_read(&ref->count))
96 debug_object_activate(ref, &active_debug_desc);
97}
98
99static void debug_active_deactivate(struct i915_active *ref)
100{
101 lockdep_assert_held(&ref->tree_lock);
102 if (!atomic_read(&ref->count))
103 debug_object_deactivate(ref, &active_debug_desc);
104}
105
106static void debug_active_fini(struct i915_active *ref)
107{
108 debug_object_free(ref, &active_debug_desc);
109}
110
111static void debug_active_assert(struct i915_active *ref)
112{
113 debug_object_assert_init(ref, &active_debug_desc);
114}
115
116#else
117
118static inline void debug_active_init(struct i915_active *ref) { }
119static inline void debug_active_activate(struct i915_active *ref) { }
120static inline void debug_active_deactivate(struct i915_active *ref) { }
121static inline void debug_active_fini(struct i915_active *ref) { }
122static inline void debug_active_assert(struct i915_active *ref) { }
123
124#endif
125
126static void
127__active_retire(struct i915_active *ref)
128{
129 struct rb_root root = RB_ROOT;
130 struct active_node *it, *n;
131 unsigned long flags;
132
133 GEM_BUG_ON(i915_active_is_idle(ref));
134
135
136 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
137 return;
138
139 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
140 debug_active_deactivate(ref);
141
142
143 if (!ref->cache)
144 ref->cache = fetch_node(ref->tree.rb_node);
145
146
147 if (ref->cache) {
148
149 rb_erase(&ref->cache->node, &ref->tree);
150 root = ref->tree;
151
152
153 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
154 rb_insert_color(&ref->cache->node, &ref->tree);
155 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
156
157
158 ref->cache->timeline = 0;
159 }
160
161 spin_unlock_irqrestore(&ref->tree_lock, flags);
162
163
164 if (ref->retire)
165 ref->retire(ref);
166
167
168 wake_up_var(ref);
169
170
171 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
172 GEM_BUG_ON(i915_active_fence_isset(&it->base));
173 kmem_cache_free(slab_cache, it);
174 }
175}
176
177static void
178active_work(struct work_struct *wrk)
179{
180 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
181
182 GEM_BUG_ON(!atomic_read(&ref->count));
183 if (atomic_add_unless(&ref->count, -1, 1))
184 return;
185
186 __active_retire(ref);
187}
188
189static void
190active_retire(struct i915_active *ref)
191{
192 GEM_BUG_ON(!atomic_read(&ref->count));
193 if (atomic_add_unless(&ref->count, -1, 1))
194 return;
195
196 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
197 queue_work(system_unbound_wq, &ref->work);
198 return;
199 }
200
201 __active_retire(ref);
202}
203
204static inline struct dma_fence **
205__active_fence_slot(struct i915_active_fence *active)
206{
207 return (struct dma_fence ** __force)&active->fence;
208}
209
210static inline bool
211active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
212{
213 struct i915_active_fence *active =
214 container_of(cb, typeof(*active), cb);
215
216 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
217}
218
219static void
220node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
221{
222 if (active_fence_cb(fence, cb))
223 active_retire(container_of(cb, struct active_node, base.cb)->ref);
224}
225
226static void
227excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
228{
229 if (active_fence_cb(fence, cb))
230 active_retire(container_of(cb, struct i915_active, excl.cb));
231}
232
233static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
234{
235 struct active_node *it;
236
237 GEM_BUG_ON(idx == 0);
238
239
240
241
242
243
244
245
246 it = READ_ONCE(ref->cache);
247 if (it) {
248 u64 cached = READ_ONCE(it->timeline);
249
250
251 if (cached == idx)
252 return it;
253
254
255
256
257
258
259
260
261
262
263
264 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
265 return it;
266 }
267
268 BUILD_BUG_ON(offsetof(typeof(*it), node));
269
270
271 GEM_BUG_ON(i915_active_is_idle(ref));
272
273 it = fetch_node(ref->tree.rb_node);
274 while (it) {
275 if (it->timeline < idx) {
276 it = fetch_node(it->node.rb_right);
277 } else if (it->timeline > idx) {
278 it = fetch_node(it->node.rb_left);
279 } else {
280 WRITE_ONCE(ref->cache, it);
281 break;
282 }
283 }
284
285
286 return it;
287}
288
289static struct i915_active_fence *
290active_instance(struct i915_active *ref, u64 idx)
291{
292 struct active_node *node;
293 struct rb_node **p, *parent;
294
295 node = __active_lookup(ref, idx);
296 if (likely(node))
297 return &node->base;
298
299 spin_lock_irq(&ref->tree_lock);
300 GEM_BUG_ON(i915_active_is_idle(ref));
301
302 parent = NULL;
303 p = &ref->tree.rb_node;
304 while (*p) {
305 parent = *p;
306
307 node = rb_entry(parent, struct active_node, node);
308 if (node->timeline == idx)
309 goto out;
310
311 if (node->timeline < idx)
312 p = &parent->rb_right;
313 else
314 p = &parent->rb_left;
315 }
316
317
318
319
320
321 node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
322 if (!node)
323 goto out;
324
325 __i915_active_fence_init(&node->base, NULL, node_retire);
326 node->ref = ref;
327 node->timeline = idx;
328
329 rb_link_node(&node->node, parent, p);
330 rb_insert_color(&node->node, &ref->tree);
331
332out:
333 WRITE_ONCE(ref->cache, node);
334 spin_unlock_irq(&ref->tree_lock);
335
336 return &node->base;
337}
338
339void __i915_active_init(struct i915_active *ref,
340 int (*active)(struct i915_active *ref),
341 void (*retire)(struct i915_active *ref),
342 unsigned long flags,
343 struct lock_class_key *mkey,
344 struct lock_class_key *wkey)
345{
346 debug_active_init(ref);
347
348 ref->flags = flags;
349 ref->active = active;
350 ref->retire = retire;
351
352 spin_lock_init(&ref->tree_lock);
353 ref->tree = RB_ROOT;
354 ref->cache = NULL;
355
356 init_llist_head(&ref->preallocated_barriers);
357 atomic_set(&ref->count, 0);
358 __mutex_init(&ref->mutex, "i915_active", mkey);
359 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
360 INIT_WORK(&ref->work, active_work);
361#if IS_ENABLED(CONFIG_LOCKDEP)
362 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
363#endif
364}
365
366static bool ____active_del_barrier(struct i915_active *ref,
367 struct active_node *node,
368 struct intel_engine_cs *engine)
369
370{
371 struct llist_node *head = NULL, *tail = NULL;
372 struct llist_node *pos, *next;
373
374 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
392 if (node == barrier_from_ll(pos)) {
393 node = NULL;
394 continue;
395 }
396
397 pos->next = head;
398 head = pos;
399 if (!tail)
400 tail = pos;
401 }
402 if (head)
403 llist_add_batch(head, tail, &engine->barrier_tasks);
404
405 return !node;
406}
407
408static bool
409__active_del_barrier(struct i915_active *ref, struct active_node *node)
410{
411 return ____active_del_barrier(ref, node, barrier_to_engine(node));
412}
413
414static bool
415replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
416{
417 if (!is_barrier(active))
418 return false;
419
420
421
422
423
424
425 __active_del_barrier(ref, node_from_active(active));
426 return true;
427}
428
429int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
430{
431 struct i915_active_fence *active;
432 int err;
433
434
435 err = i915_active_acquire(ref);
436 if (err)
437 return err;
438
439 active = active_instance(ref, idx);
440 if (!active) {
441 err = -ENOMEM;
442 goto out;
443 }
444
445 if (replace_barrier(ref, active)) {
446 RCU_INIT_POINTER(active->fence, NULL);
447 atomic_dec(&ref->count);
448 }
449 if (!__i915_active_fence_set(active, fence))
450 __i915_active_acquire(ref);
451
452out:
453 i915_active_release(ref);
454 return err;
455}
456
457static struct dma_fence *
458__i915_active_set_fence(struct i915_active *ref,
459 struct i915_active_fence *active,
460 struct dma_fence *fence)
461{
462 struct dma_fence *prev;
463
464 if (replace_barrier(ref, active)) {
465 RCU_INIT_POINTER(active->fence, fence);
466 return NULL;
467 }
468
469 rcu_read_lock();
470 prev = __i915_active_fence_set(active, fence);
471 if (prev)
472 prev = dma_fence_get_rcu(prev);
473 else
474 __i915_active_acquire(ref);
475 rcu_read_unlock();
476
477 return prev;
478}
479
480static struct i915_active_fence *
481__active_fence(struct i915_active *ref, u64 idx)
482{
483 struct active_node *it;
484
485 it = __active_lookup(ref, idx);
486 if (unlikely(!it)) {
487 spin_lock_irq(&ref->tree_lock);
488 it = __active_lookup(ref, idx);
489 spin_unlock_irq(&ref->tree_lock);
490 }
491 GEM_BUG_ON(!it);
492
493 return &it->base;
494}
495
496struct dma_fence *
497__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
498{
499
500 return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
501}
502
503struct dma_fence *
504i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
505{
506
507 return __i915_active_set_fence(ref, &ref->excl, f);
508}
509
510bool i915_active_acquire_if_busy(struct i915_active *ref)
511{
512 debug_active_assert(ref);
513 return atomic_add_unless(&ref->count, 1, 0);
514}
515
516static void __i915_active_activate(struct i915_active *ref)
517{
518 spin_lock_irq(&ref->tree_lock);
519 if (!atomic_fetch_inc(&ref->count))
520 debug_active_activate(ref);
521 spin_unlock_irq(&ref->tree_lock);
522}
523
524int i915_active_acquire(struct i915_active *ref)
525{
526 int err;
527
528 if (i915_active_acquire_if_busy(ref))
529 return 0;
530
531 if (!ref->active) {
532 __i915_active_activate(ref);
533 return 0;
534 }
535
536 err = mutex_lock_interruptible(&ref->mutex);
537 if (err)
538 return err;
539
540 if (likely(!i915_active_acquire_if_busy(ref))) {
541 err = ref->active(ref);
542 if (!err)
543 __i915_active_activate(ref);
544 }
545
546 mutex_unlock(&ref->mutex);
547
548 return err;
549}
550
551int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
552{
553 struct i915_active_fence *active;
554 int err;
555
556 err = i915_active_acquire(ref);
557 if (err)
558 return err;
559
560 active = active_instance(ref, idx);
561 if (!active) {
562 i915_active_release(ref);
563 return -ENOMEM;
564 }
565
566 return 0;
567}
568
569void i915_active_release(struct i915_active *ref)
570{
571 debug_active_assert(ref);
572 active_retire(ref);
573}
574
575static void enable_signaling(struct i915_active_fence *active)
576{
577 struct dma_fence *fence;
578
579 if (unlikely(is_barrier(active)))
580 return;
581
582 fence = i915_active_fence_get(active);
583 if (!fence)
584 return;
585
586 dma_fence_enable_sw_signaling(fence);
587 dma_fence_put(fence);
588}
589
590static int flush_barrier(struct active_node *it)
591{
592 struct intel_engine_cs *engine;
593
594 if (likely(!is_barrier(&it->base)))
595 return 0;
596
597 engine = __barrier_to_engine(it);
598 smp_rmb();
599 if (!is_barrier(&it->base))
600 return 0;
601
602 return intel_engine_flush_barriers(engine);
603}
604
605static int flush_lazy_signals(struct i915_active *ref)
606{
607 struct active_node *it, *n;
608 int err = 0;
609
610 enable_signaling(&ref->excl);
611 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
612 err = flush_barrier(it);
613 if (err)
614 break;
615
616 enable_signaling(&it->base);
617 }
618
619 return err;
620}
621
622int __i915_active_wait(struct i915_active *ref, int state)
623{
624 might_sleep();
625
626
627 if (i915_active_acquire_if_busy(ref)) {
628 int err;
629
630 err = flush_lazy_signals(ref);
631 i915_active_release(ref);
632 if (err)
633 return err;
634
635 if (___wait_var_event(ref, i915_active_is_idle(ref),
636 state, 0, 0, schedule()))
637 return -EINTR;
638 }
639
640
641
642
643
644 flush_work(&ref->work);
645 return 0;
646}
647
648static int __await_active(struct i915_active_fence *active,
649 int (*fn)(void *arg, struct dma_fence *fence),
650 void *arg)
651{
652 struct dma_fence *fence;
653
654 if (is_barrier(active))
655 return 0;
656
657 fence = i915_active_fence_get(active);
658 if (fence) {
659 int err;
660
661 err = fn(arg, fence);
662 dma_fence_put(fence);
663 if (err < 0)
664 return err;
665 }
666
667 return 0;
668}
669
670struct wait_barrier {
671 struct wait_queue_entry base;
672 struct i915_active *ref;
673};
674
675static int
676barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
677{
678 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
679
680 if (i915_active_is_idle(wb->ref)) {
681 list_del(&wq->entry);
682 i915_sw_fence_complete(wq->private);
683 kfree(wq);
684 }
685
686 return 0;
687}
688
689static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
690{
691 struct wait_barrier *wb;
692
693 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
694 if (unlikely(!wb))
695 return -ENOMEM;
696
697 GEM_BUG_ON(i915_active_is_idle(ref));
698 if (!i915_sw_fence_await(fence)) {
699 kfree(wb);
700 return -EINVAL;
701 }
702
703 wb->base.flags = 0;
704 wb->base.func = barrier_wake;
705 wb->base.private = fence;
706 wb->ref = ref;
707
708 add_wait_queue(__var_waitqueue(ref), &wb->base);
709 return 0;
710}
711
712static int await_active(struct i915_active *ref,
713 unsigned int flags,
714 int (*fn)(void *arg, struct dma_fence *fence),
715 void *arg, struct i915_sw_fence *barrier)
716{
717 int err = 0;
718
719 if (!i915_active_acquire_if_busy(ref))
720 return 0;
721
722 if (flags & I915_ACTIVE_AWAIT_EXCL &&
723 rcu_access_pointer(ref->excl.fence)) {
724 err = __await_active(&ref->excl, fn, arg);
725 if (err)
726 goto out;
727 }
728
729 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
730 struct active_node *it, *n;
731
732 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
733 err = __await_active(&it->base, fn, arg);
734 if (err)
735 goto out;
736 }
737 }
738
739 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
740 err = flush_lazy_signals(ref);
741 if (err)
742 goto out;
743
744 err = __await_barrier(ref, barrier);
745 if (err)
746 goto out;
747 }
748
749out:
750 i915_active_release(ref);
751 return err;
752}
753
754static int rq_await_fence(void *arg, struct dma_fence *fence)
755{
756 return i915_request_await_dma_fence(arg, fence);
757}
758
759int i915_request_await_active(struct i915_request *rq,
760 struct i915_active *ref,
761 unsigned int flags)
762{
763 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
764}
765
766static int sw_await_fence(void *arg, struct dma_fence *fence)
767{
768 return i915_sw_fence_await_dma_fence(arg, fence, 0,
769 GFP_NOWAIT | __GFP_NOWARN);
770}
771
772int i915_sw_fence_await_active(struct i915_sw_fence *fence,
773 struct i915_active *ref,
774 unsigned int flags)
775{
776 return await_active(ref, flags, sw_await_fence, fence, fence);
777}
778
779void i915_active_fini(struct i915_active *ref)
780{
781 debug_active_fini(ref);
782 GEM_BUG_ON(atomic_read(&ref->count));
783 GEM_BUG_ON(work_pending(&ref->work));
784 mutex_destroy(&ref->mutex);
785
786 if (ref->cache)
787 kmem_cache_free(slab_cache, ref->cache);
788}
789
790static inline bool is_idle_barrier(struct active_node *node, u64 idx)
791{
792 return node->timeline == idx && !i915_active_fence_isset(&node->base);
793}
794
795static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
796{
797 struct rb_node *prev, *p;
798
799 if (RB_EMPTY_ROOT(&ref->tree))
800 return NULL;
801
802 GEM_BUG_ON(i915_active_is_idle(ref));
803
804
805
806
807
808
809
810
811 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
812 p = &ref->cache->node;
813 goto match;
814 }
815
816 prev = NULL;
817 p = ref->tree.rb_node;
818 while (p) {
819 struct active_node *node =
820 rb_entry(p, struct active_node, node);
821
822 if (is_idle_barrier(node, idx))
823 goto match;
824
825 prev = p;
826 if (node->timeline < idx)
827 p = READ_ONCE(p->rb_right);
828 else
829 p = READ_ONCE(p->rb_left);
830 }
831
832
833
834
835
836
837
838 for (p = prev; p; p = rb_next(p)) {
839 struct active_node *node =
840 rb_entry(p, struct active_node, node);
841 struct intel_engine_cs *engine;
842
843 if (node->timeline > idx)
844 break;
845
846 if (node->timeline < idx)
847 continue;
848
849 if (is_idle_barrier(node, idx))
850 goto match;
851
852
853
854
855
856
857
858
859 engine = __barrier_to_engine(node);
860 smp_rmb();
861 if (is_barrier(&node->base) &&
862 ____active_del_barrier(ref, node, engine))
863 goto match;
864 }
865
866 return NULL;
867
868match:
869 spin_lock_irq(&ref->tree_lock);
870 rb_erase(p, &ref->tree);
871 if (p == &ref->cache->node)
872 WRITE_ONCE(ref->cache, NULL);
873 spin_unlock_irq(&ref->tree_lock);
874
875 return rb_entry(p, struct active_node, node);
876}
877
878int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
879 struct intel_engine_cs *engine)
880{
881 intel_engine_mask_t tmp, mask = engine->mask;
882 struct llist_node *first = NULL, *last = NULL;
883 struct intel_gt *gt = engine->gt;
884
885 GEM_BUG_ON(i915_active_is_idle(ref));
886
887
888 while (!llist_empty(&ref->preallocated_barriers))
889 cond_resched();
890
891
892
893
894
895
896
897 GEM_BUG_ON(!mask);
898 for_each_engine_masked(engine, gt, mask, tmp) {
899 u64 idx = engine->kernel_context->timeline->fence_context;
900 struct llist_node *prev = first;
901 struct active_node *node;
902
903 rcu_read_lock();
904 node = reuse_idle_barrier(ref, idx);
905 rcu_read_unlock();
906 if (!node) {
907 node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
908 if (!node)
909 goto unwind;
910
911 RCU_INIT_POINTER(node->base.fence, NULL);
912 node->base.cb.func = node_retire;
913 node->timeline = idx;
914 node->ref = ref;
915 }
916
917 if (!i915_active_fence_isset(&node->base)) {
918
919
920
921
922
923
924
925
926
927 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
928 node->base.cb.node.prev = (void *)engine;
929 __i915_active_acquire(ref);
930 }
931 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
932
933 GEM_BUG_ON(barrier_to_engine(node) != engine);
934 first = barrier_to_ll(node);
935 first->next = prev;
936 if (!last)
937 last = first;
938 intel_engine_pm_get(engine);
939 }
940
941 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
942 llist_add_batch(first, last, &ref->preallocated_barriers);
943
944 return 0;
945
946unwind:
947 while (first) {
948 struct active_node *node = barrier_from_ll(first);
949
950 first = first->next;
951
952 atomic_dec(&ref->count);
953 intel_engine_pm_put(barrier_to_engine(node));
954
955 kmem_cache_free(slab_cache, node);
956 }
957 return -ENOMEM;
958}
959
960void i915_active_acquire_barrier(struct i915_active *ref)
961{
962 struct llist_node *pos, *next;
963 unsigned long flags;
964
965 GEM_BUG_ON(i915_active_is_idle(ref));
966
967
968
969
970
971
972
973 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
974 struct active_node *node = barrier_from_ll(pos);
975 struct intel_engine_cs *engine = barrier_to_engine(node);
976 struct rb_node **p, *parent;
977
978 spin_lock_irqsave_nested(&ref->tree_lock, flags,
979 SINGLE_DEPTH_NESTING);
980 parent = NULL;
981 p = &ref->tree.rb_node;
982 while (*p) {
983 struct active_node *it;
984
985 parent = *p;
986
987 it = rb_entry(parent, struct active_node, node);
988 if (it->timeline < node->timeline)
989 p = &parent->rb_right;
990 else
991 p = &parent->rb_left;
992 }
993 rb_link_node(&node->node, parent, p);
994 rb_insert_color(&node->node, &ref->tree);
995 spin_unlock_irqrestore(&ref->tree_lock, flags);
996
997 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
998 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
999 intel_engine_pm_put_delay(engine, 1);
1000 }
1001}
1002
1003static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1004{
1005 return __active_fence_slot(&barrier_from_ll(node)->base);
1006}
1007
1008void i915_request_add_active_barriers(struct i915_request *rq)
1009{
1010 struct intel_engine_cs *engine = rq->engine;
1011 struct llist_node *node, *next;
1012 unsigned long flags;
1013
1014 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1015 GEM_BUG_ON(intel_engine_is_virtual(engine));
1016 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1017
1018 node = llist_del_all(&engine->barrier_tasks);
1019 if (!node)
1020 return;
1021
1022
1023
1024
1025
1026 spin_lock_irqsave(&rq->lock, flags);
1027 llist_for_each_safe(node, next, node) {
1028
1029 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1030 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1031 }
1032 spin_unlock_irqrestore(&rq->lock, flags);
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047struct dma_fence *
1048__i915_active_fence_set(struct i915_active_fence *active,
1049 struct dma_fence *fence)
1050{
1051 struct dma_fence *prev;
1052 unsigned long flags;
1053
1054 if (fence == rcu_access_pointer(active->fence))
1055 return fence;
1056
1057 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 spin_lock_irqsave(fence->lock, flags);
1080 prev = xchg(__active_fence_slot(active), fence);
1081 if (prev) {
1082 GEM_BUG_ON(prev == fence);
1083 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1084 __list_del_entry(&active->cb.node);
1085 spin_unlock(prev->lock);
1086 }
1087 list_add_tail(&active->cb.node, &fence->cb_list);
1088 spin_unlock_irqrestore(fence->lock, flags);
1089
1090 return prev;
1091}
1092
1093int i915_active_fence_set(struct i915_active_fence *active,
1094 struct i915_request *rq)
1095{
1096 struct dma_fence *fence;
1097 int err = 0;
1098
1099
1100 rcu_read_lock();
1101 fence = __i915_active_fence_set(active, &rq->fence);
1102 if (fence)
1103 fence = dma_fence_get_rcu(fence);
1104 rcu_read_unlock();
1105 if (fence) {
1106 err = i915_request_await_dma_fence(rq, fence);
1107 dma_fence_put(fence);
1108 }
1109
1110 return err;
1111}
1112
1113void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1114{
1115 active_fence_cb(fence, cb);
1116}
1117
1118struct auto_active {
1119 struct i915_active base;
1120 struct kref ref;
1121};
1122
1123struct i915_active *i915_active_get(struct i915_active *ref)
1124{
1125 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1126
1127 kref_get(&aa->ref);
1128 return &aa->base;
1129}
1130
1131static void auto_release(struct kref *ref)
1132{
1133 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1134
1135 i915_active_fini(&aa->base);
1136 kfree(aa);
1137}
1138
1139void i915_active_put(struct i915_active *ref)
1140{
1141 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1142
1143 kref_put(&aa->ref, auto_release);
1144}
1145
1146static int auto_active(struct i915_active *ref)
1147{
1148 i915_active_get(ref);
1149 return 0;
1150}
1151
1152static void auto_retire(struct i915_active *ref)
1153{
1154 i915_active_put(ref);
1155}
1156
1157struct i915_active *i915_active_create(void)
1158{
1159 struct auto_active *aa;
1160
1161 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1162 if (!aa)
1163 return NULL;
1164
1165 kref_init(&aa->ref);
1166 i915_active_init(&aa->base, auto_active, auto_retire, 0);
1167
1168 return &aa->base;
1169}
1170
1171#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1172#include "selftests/i915_active.c"
1173#endif
1174
1175void i915_active_module_exit(void)
1176{
1177 kmem_cache_destroy(slab_cache);
1178}
1179
1180int __init i915_active_module_init(void)
1181{
1182 slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1183 if (!slab_cache)
1184 return -ENOMEM;
1185
1186 return 0;
1187}
1188