1
2
3
4
5
6
7#include <linux/debugobjects.h>
8
9#include "gt/intel_context.h"
10#include "gt/intel_engine_heartbeat.h"
11#include "gt/intel_engine_pm.h"
12#include "gt/intel_ring.h"
13
14#include "i915_drv.h"
15#include "i915_active.h"
16#include "i915_globals.h"
17
18
19
20
21
22
23
24
25static struct i915_global_active {
26 struct i915_global base;
27 struct kmem_cache *slab_cache;
28} global;
29
30struct active_node {
31 struct rb_node node;
32 struct i915_active_fence base;
33 struct i915_active *ref;
34 u64 timeline;
35};
36
37#define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
38
39static inline struct active_node *
40node_from_active(struct i915_active_fence *active)
41{
42 return container_of(active, struct active_node, base);
43}
44
45#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
46
47static inline bool is_barrier(const struct i915_active_fence *active)
48{
49 return IS_ERR(rcu_access_pointer(active->fence));
50}
51
52static inline struct llist_node *barrier_to_ll(struct active_node *node)
53{
54 GEM_BUG_ON(!is_barrier(&node->base));
55 return (struct llist_node *)&node->base.cb.node;
56}
57
58static inline struct intel_engine_cs *
59__barrier_to_engine(struct active_node *node)
60{
61 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
62}
63
64static inline struct intel_engine_cs *
65barrier_to_engine(struct active_node *node)
66{
67 GEM_BUG_ON(!is_barrier(&node->base));
68 return __barrier_to_engine(node);
69}
70
71static inline struct active_node *barrier_from_ll(struct llist_node *x)
72{
73 return container_of((struct list_head *)x,
74 struct active_node, base.cb.node);
75}
76
77#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
78
79static void *active_debug_hint(void *addr)
80{
81 struct i915_active *ref = addr;
82
83 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
84}
85
86static const struct debug_obj_descr active_debug_desc = {
87 .name = "i915_active",
88 .debug_hint = active_debug_hint,
89};
90
91static void debug_active_init(struct i915_active *ref)
92{
93 debug_object_init(ref, &active_debug_desc);
94}
95
96static void debug_active_activate(struct i915_active *ref)
97{
98 lockdep_assert_held(&ref->tree_lock);
99 if (!atomic_read(&ref->count))
100 debug_object_activate(ref, &active_debug_desc);
101}
102
103static void debug_active_deactivate(struct i915_active *ref)
104{
105 lockdep_assert_held(&ref->tree_lock);
106 if (!atomic_read(&ref->count))
107 debug_object_deactivate(ref, &active_debug_desc);
108}
109
110static void debug_active_fini(struct i915_active *ref)
111{
112 debug_object_free(ref, &active_debug_desc);
113}
114
115static void debug_active_assert(struct i915_active *ref)
116{
117 debug_object_assert_init(ref, &active_debug_desc);
118}
119
120#else
121
122static inline void debug_active_init(struct i915_active *ref) { }
123static inline void debug_active_activate(struct i915_active *ref) { }
124static inline void debug_active_deactivate(struct i915_active *ref) { }
125static inline void debug_active_fini(struct i915_active *ref) { }
126static inline void debug_active_assert(struct i915_active *ref) { }
127
128#endif
129
130static void
131__active_retire(struct i915_active *ref)
132{
133 struct rb_root root = RB_ROOT;
134 struct active_node *it, *n;
135 unsigned long flags;
136
137 GEM_BUG_ON(i915_active_is_idle(ref));
138
139
140 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
141 return;
142
143 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
144 debug_active_deactivate(ref);
145
146
147 if (!ref->cache)
148 ref->cache = fetch_node(ref->tree.rb_node);
149
150
151 if (ref->cache) {
152
153 rb_erase(&ref->cache->node, &ref->tree);
154 root = ref->tree;
155
156
157 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
158 rb_insert_color(&ref->cache->node, &ref->tree);
159 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
160
161
162 ref->cache->timeline = 0;
163 }
164
165 spin_unlock_irqrestore(&ref->tree_lock, flags);
166
167
168 if (ref->retire)
169 ref->retire(ref);
170
171
172 wake_up_var(ref);
173
174
175 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
176 GEM_BUG_ON(i915_active_fence_isset(&it->base));
177 kmem_cache_free(global.slab_cache, it);
178 }
179}
180
181static void
182active_work(struct work_struct *wrk)
183{
184 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
185
186 GEM_BUG_ON(!atomic_read(&ref->count));
187 if (atomic_add_unless(&ref->count, -1, 1))
188 return;
189
190 __active_retire(ref);
191}
192
193static void
194active_retire(struct i915_active *ref)
195{
196 GEM_BUG_ON(!atomic_read(&ref->count));
197 if (atomic_add_unless(&ref->count, -1, 1))
198 return;
199
200 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
201 queue_work(system_unbound_wq, &ref->work);
202 return;
203 }
204
205 __active_retire(ref);
206}
207
208static inline struct dma_fence **
209__active_fence_slot(struct i915_active_fence *active)
210{
211 return (struct dma_fence ** __force)&active->fence;
212}
213
214static inline bool
215active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
216{
217 struct i915_active_fence *active =
218 container_of(cb, typeof(*active), cb);
219
220 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
221}
222
223static void
224node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
225{
226 if (active_fence_cb(fence, cb))
227 active_retire(container_of(cb, struct active_node, base.cb)->ref);
228}
229
230static void
231excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
232{
233 if (active_fence_cb(fence, cb))
234 active_retire(container_of(cb, struct i915_active, excl.cb));
235}
236
237static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
238{
239 struct active_node *it;
240
241 GEM_BUG_ON(idx == 0);
242
243
244
245
246
247
248
249
250 it = READ_ONCE(ref->cache);
251 if (it) {
252 u64 cached = READ_ONCE(it->timeline);
253
254
255 if (cached == idx)
256 return it;
257
258
259
260
261
262
263
264
265
266
267
268 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
269 return it;
270 }
271
272 BUILD_BUG_ON(offsetof(typeof(*it), node));
273
274
275 GEM_BUG_ON(i915_active_is_idle(ref));
276
277 it = fetch_node(ref->tree.rb_node);
278 while (it) {
279 if (it->timeline < idx) {
280 it = fetch_node(it->node.rb_right);
281 } else if (it->timeline > idx) {
282 it = fetch_node(it->node.rb_left);
283 } else {
284 WRITE_ONCE(ref->cache, it);
285 break;
286 }
287 }
288
289
290 return it;
291}
292
293static struct i915_active_fence *
294active_instance(struct i915_active *ref, u64 idx)
295{
296 struct active_node *node, *prealloc;
297 struct rb_node **p, *parent;
298
299 node = __active_lookup(ref, idx);
300 if (likely(node))
301 return &node->base;
302
303
304 prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
305 if (!prealloc)
306 return NULL;
307
308 spin_lock_irq(&ref->tree_lock);
309 GEM_BUG_ON(i915_active_is_idle(ref));
310
311 parent = NULL;
312 p = &ref->tree.rb_node;
313 while (*p) {
314 parent = *p;
315
316 node = rb_entry(parent, struct active_node, node);
317 if (node->timeline == idx) {
318 kmem_cache_free(global.slab_cache, prealloc);
319 goto out;
320 }
321
322 if (node->timeline < idx)
323 p = &parent->rb_right;
324 else
325 p = &parent->rb_left;
326 }
327
328 node = prealloc;
329 __i915_active_fence_init(&node->base, NULL, node_retire);
330 node->ref = ref;
331 node->timeline = idx;
332
333 rb_link_node(&node->node, parent, p);
334 rb_insert_color(&node->node, &ref->tree);
335
336out:
337 WRITE_ONCE(ref->cache, node);
338 spin_unlock_irq(&ref->tree_lock);
339
340 return &node->base;
341}
342
343void __i915_active_init(struct i915_active *ref,
344 int (*active)(struct i915_active *ref),
345 void (*retire)(struct i915_active *ref),
346 struct lock_class_key *mkey,
347 struct lock_class_key *wkey)
348{
349 unsigned long bits;
350
351 debug_active_init(ref);
352
353 ref->flags = 0;
354 ref->active = active;
355 ref->retire = ptr_unpack_bits(retire, &bits, 2);
356 if (bits & I915_ACTIVE_MAY_SLEEP)
357 ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
358
359 spin_lock_init(&ref->tree_lock);
360 ref->tree = RB_ROOT;
361 ref->cache = NULL;
362
363 init_llist_head(&ref->preallocated_barriers);
364 atomic_set(&ref->count, 0);
365 __mutex_init(&ref->mutex, "i915_active", mkey);
366 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
367 INIT_WORK(&ref->work, active_work);
368#if IS_ENABLED(CONFIG_LOCKDEP)
369 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
370#endif
371}
372
373static bool ____active_del_barrier(struct i915_active *ref,
374 struct active_node *node,
375 struct intel_engine_cs *engine)
376
377{
378 struct llist_node *head = NULL, *tail = NULL;
379 struct llist_node *pos, *next;
380
381 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
399 if (node == barrier_from_ll(pos)) {
400 node = NULL;
401 continue;
402 }
403
404 pos->next = head;
405 head = pos;
406 if (!tail)
407 tail = pos;
408 }
409 if (head)
410 llist_add_batch(head, tail, &engine->barrier_tasks);
411
412 return !node;
413}
414
415static bool
416__active_del_barrier(struct i915_active *ref, struct active_node *node)
417{
418 return ____active_del_barrier(ref, node, barrier_to_engine(node));
419}
420
421static bool
422replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
423{
424 if (!is_barrier(active))
425 return false;
426
427
428
429
430
431
432 __active_del_barrier(ref, node_from_active(active));
433 return true;
434}
435
436int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
437{
438 struct i915_active_fence *active;
439 int err;
440
441
442 err = i915_active_acquire(ref);
443 if (err)
444 return err;
445
446 active = active_instance(ref, idx);
447 if (!active) {
448 err = -ENOMEM;
449 goto out;
450 }
451
452 if (replace_barrier(ref, active)) {
453 RCU_INIT_POINTER(active->fence, NULL);
454 atomic_dec(&ref->count);
455 }
456 if (!__i915_active_fence_set(active, fence))
457 __i915_active_acquire(ref);
458
459out:
460 i915_active_release(ref);
461 return err;
462}
463
464static struct dma_fence *
465__i915_active_set_fence(struct i915_active *ref,
466 struct i915_active_fence *active,
467 struct dma_fence *fence)
468{
469 struct dma_fence *prev;
470
471 if (replace_barrier(ref, active)) {
472 RCU_INIT_POINTER(active->fence, fence);
473 return NULL;
474 }
475
476 rcu_read_lock();
477 prev = __i915_active_fence_set(active, fence);
478 if (prev)
479 prev = dma_fence_get_rcu(prev);
480 else
481 __i915_active_acquire(ref);
482 rcu_read_unlock();
483
484 return prev;
485}
486
487static struct i915_active_fence *
488__active_fence(struct i915_active *ref, u64 idx)
489{
490 struct active_node *it;
491
492 it = __active_lookup(ref, idx);
493 if (unlikely(!it)) {
494 spin_lock_irq(&ref->tree_lock);
495 it = __active_lookup(ref, idx);
496 spin_unlock_irq(&ref->tree_lock);
497 }
498 GEM_BUG_ON(!it);
499
500 return &it->base;
501}
502
503struct dma_fence *
504__i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
505{
506
507 return __i915_active_set_fence(ref, __active_fence(ref, idx), fence);
508}
509
510struct dma_fence *
511i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
512{
513
514 return __i915_active_set_fence(ref, &ref->excl, f);
515}
516
517bool i915_active_acquire_if_busy(struct i915_active *ref)
518{
519 debug_active_assert(ref);
520 return atomic_add_unless(&ref->count, 1, 0);
521}
522
523static void __i915_active_activate(struct i915_active *ref)
524{
525 spin_lock_irq(&ref->tree_lock);
526 if (!atomic_fetch_inc(&ref->count))
527 debug_active_activate(ref);
528 spin_unlock_irq(&ref->tree_lock);
529}
530
531int i915_active_acquire(struct i915_active *ref)
532{
533 int err;
534
535 if (i915_active_acquire_if_busy(ref))
536 return 0;
537
538 if (!ref->active) {
539 __i915_active_activate(ref);
540 return 0;
541 }
542
543 err = mutex_lock_interruptible(&ref->mutex);
544 if (err)
545 return err;
546
547 if (likely(!i915_active_acquire_if_busy(ref))) {
548 err = ref->active(ref);
549 if (!err)
550 __i915_active_activate(ref);
551 }
552
553 mutex_unlock(&ref->mutex);
554
555 return err;
556}
557
558int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
559{
560 struct i915_active_fence *active;
561 int err;
562
563 err = i915_active_acquire(ref);
564 if (err)
565 return err;
566
567 active = active_instance(ref, idx);
568 if (!active) {
569 i915_active_release(ref);
570 return -ENOMEM;
571 }
572
573 return 0;
574}
575
576void i915_active_release(struct i915_active *ref)
577{
578 debug_active_assert(ref);
579 active_retire(ref);
580}
581
582static void enable_signaling(struct i915_active_fence *active)
583{
584 struct dma_fence *fence;
585
586 if (unlikely(is_barrier(active)))
587 return;
588
589 fence = i915_active_fence_get(active);
590 if (!fence)
591 return;
592
593 dma_fence_enable_sw_signaling(fence);
594 dma_fence_put(fence);
595}
596
597static int flush_barrier(struct active_node *it)
598{
599 struct intel_engine_cs *engine;
600
601 if (likely(!is_barrier(&it->base)))
602 return 0;
603
604 engine = __barrier_to_engine(it);
605 smp_rmb();
606 if (!is_barrier(&it->base))
607 return 0;
608
609 return intel_engine_flush_barriers(engine);
610}
611
612static int flush_lazy_signals(struct i915_active *ref)
613{
614 struct active_node *it, *n;
615 int err = 0;
616
617 enable_signaling(&ref->excl);
618 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
619 err = flush_barrier(it);
620 if (err)
621 break;
622
623 enable_signaling(&it->base);
624 }
625
626 return err;
627}
628
629int __i915_active_wait(struct i915_active *ref, int state)
630{
631 might_sleep();
632
633
634 if (i915_active_acquire_if_busy(ref)) {
635 int err;
636
637 err = flush_lazy_signals(ref);
638 i915_active_release(ref);
639 if (err)
640 return err;
641
642 if (___wait_var_event(ref, i915_active_is_idle(ref),
643 state, 0, 0, schedule()))
644 return -EINTR;
645 }
646
647
648
649
650
651 flush_work(&ref->work);
652 return 0;
653}
654
655static int __await_active(struct i915_active_fence *active,
656 int (*fn)(void *arg, struct dma_fence *fence),
657 void *arg)
658{
659 struct dma_fence *fence;
660
661 if (is_barrier(active))
662 return 0;
663
664 fence = i915_active_fence_get(active);
665 if (fence) {
666 int err;
667
668 err = fn(arg, fence);
669 dma_fence_put(fence);
670 if (err < 0)
671 return err;
672 }
673
674 return 0;
675}
676
677struct wait_barrier {
678 struct wait_queue_entry base;
679 struct i915_active *ref;
680};
681
682static int
683barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
684{
685 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
686
687 if (i915_active_is_idle(wb->ref)) {
688 list_del(&wq->entry);
689 i915_sw_fence_complete(wq->private);
690 kfree(wq);
691 }
692
693 return 0;
694}
695
696static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
697{
698 struct wait_barrier *wb;
699
700 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
701 if (unlikely(!wb))
702 return -ENOMEM;
703
704 GEM_BUG_ON(i915_active_is_idle(ref));
705 if (!i915_sw_fence_await(fence)) {
706 kfree(wb);
707 return -EINVAL;
708 }
709
710 wb->base.flags = 0;
711 wb->base.func = barrier_wake;
712 wb->base.private = fence;
713 wb->ref = ref;
714
715 add_wait_queue(__var_waitqueue(ref), &wb->base);
716 return 0;
717}
718
719static int await_active(struct i915_active *ref,
720 unsigned int flags,
721 int (*fn)(void *arg, struct dma_fence *fence),
722 void *arg, struct i915_sw_fence *barrier)
723{
724 int err = 0;
725
726 if (!i915_active_acquire_if_busy(ref))
727 return 0;
728
729 if (flags & I915_ACTIVE_AWAIT_EXCL &&
730 rcu_access_pointer(ref->excl.fence)) {
731 err = __await_active(&ref->excl, fn, arg);
732 if (err)
733 goto out;
734 }
735
736 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
737 struct active_node *it, *n;
738
739 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
740 err = __await_active(&it->base, fn, arg);
741 if (err)
742 goto out;
743 }
744 }
745
746 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
747 err = flush_lazy_signals(ref);
748 if (err)
749 goto out;
750
751 err = __await_barrier(ref, barrier);
752 if (err)
753 goto out;
754 }
755
756out:
757 i915_active_release(ref);
758 return err;
759}
760
761static int rq_await_fence(void *arg, struct dma_fence *fence)
762{
763 return i915_request_await_dma_fence(arg, fence);
764}
765
766int i915_request_await_active(struct i915_request *rq,
767 struct i915_active *ref,
768 unsigned int flags)
769{
770 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
771}
772
773static int sw_await_fence(void *arg, struct dma_fence *fence)
774{
775 return i915_sw_fence_await_dma_fence(arg, fence, 0,
776 GFP_NOWAIT | __GFP_NOWARN);
777}
778
779int i915_sw_fence_await_active(struct i915_sw_fence *fence,
780 struct i915_active *ref,
781 unsigned int flags)
782{
783 return await_active(ref, flags, sw_await_fence, fence, fence);
784}
785
786void i915_active_fini(struct i915_active *ref)
787{
788 debug_active_fini(ref);
789 GEM_BUG_ON(atomic_read(&ref->count));
790 GEM_BUG_ON(work_pending(&ref->work));
791 mutex_destroy(&ref->mutex);
792
793 if (ref->cache)
794 kmem_cache_free(global.slab_cache, ref->cache);
795}
796
797static inline bool is_idle_barrier(struct active_node *node, u64 idx)
798{
799 return node->timeline == idx && !i915_active_fence_isset(&node->base);
800}
801
802static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
803{
804 struct rb_node *prev, *p;
805
806 if (RB_EMPTY_ROOT(&ref->tree))
807 return NULL;
808
809 GEM_BUG_ON(i915_active_is_idle(ref));
810
811
812
813
814
815
816
817
818 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
819 p = &ref->cache->node;
820 goto match;
821 }
822
823 prev = NULL;
824 p = ref->tree.rb_node;
825 while (p) {
826 struct active_node *node =
827 rb_entry(p, struct active_node, node);
828
829 if (is_idle_barrier(node, idx))
830 goto match;
831
832 prev = p;
833 if (node->timeline < idx)
834 p = READ_ONCE(p->rb_right);
835 else
836 p = READ_ONCE(p->rb_left);
837 }
838
839
840
841
842
843
844
845 for (p = prev; p; p = rb_next(p)) {
846 struct active_node *node =
847 rb_entry(p, struct active_node, node);
848 struct intel_engine_cs *engine;
849
850 if (node->timeline > idx)
851 break;
852
853 if (node->timeline < idx)
854 continue;
855
856 if (is_idle_barrier(node, idx))
857 goto match;
858
859
860
861
862
863
864
865
866 engine = __barrier_to_engine(node);
867 smp_rmb();
868 if (is_barrier(&node->base) &&
869 ____active_del_barrier(ref, node, engine))
870 goto match;
871 }
872
873 return NULL;
874
875match:
876 spin_lock_irq(&ref->tree_lock);
877 rb_erase(p, &ref->tree);
878 if (p == &ref->cache->node)
879 WRITE_ONCE(ref->cache, NULL);
880 spin_unlock_irq(&ref->tree_lock);
881
882 return rb_entry(p, struct active_node, node);
883}
884
885int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
886 struct intel_engine_cs *engine)
887{
888 intel_engine_mask_t tmp, mask = engine->mask;
889 struct llist_node *first = NULL, *last = NULL;
890 struct intel_gt *gt = engine->gt;
891
892 GEM_BUG_ON(i915_active_is_idle(ref));
893
894
895 while (!llist_empty(&ref->preallocated_barriers))
896 cond_resched();
897
898
899
900
901
902
903
904 GEM_BUG_ON(!mask);
905 for_each_engine_masked(engine, gt, mask, tmp) {
906 u64 idx = engine->kernel_context->timeline->fence_context;
907 struct llist_node *prev = first;
908 struct active_node *node;
909
910 rcu_read_lock();
911 node = reuse_idle_barrier(ref, idx);
912 rcu_read_unlock();
913 if (!node) {
914 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
915 if (!node)
916 goto unwind;
917
918 RCU_INIT_POINTER(node->base.fence, NULL);
919 node->base.cb.func = node_retire;
920 node->timeline = idx;
921 node->ref = ref;
922 }
923
924 if (!i915_active_fence_isset(&node->base)) {
925
926
927
928
929
930
931
932
933
934 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
935 node->base.cb.node.prev = (void *)engine;
936 __i915_active_acquire(ref);
937 }
938 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
939
940 GEM_BUG_ON(barrier_to_engine(node) != engine);
941 first = barrier_to_ll(node);
942 first->next = prev;
943 if (!last)
944 last = first;
945 intel_engine_pm_get(engine);
946 }
947
948 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
949 llist_add_batch(first, last, &ref->preallocated_barriers);
950
951 return 0;
952
953unwind:
954 while (first) {
955 struct active_node *node = barrier_from_ll(first);
956
957 first = first->next;
958
959 atomic_dec(&ref->count);
960 intel_engine_pm_put(barrier_to_engine(node));
961
962 kmem_cache_free(global.slab_cache, node);
963 }
964 return -ENOMEM;
965}
966
967void i915_active_acquire_barrier(struct i915_active *ref)
968{
969 struct llist_node *pos, *next;
970 unsigned long flags;
971
972 GEM_BUG_ON(i915_active_is_idle(ref));
973
974
975
976
977
978
979
980 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
981 struct active_node *node = barrier_from_ll(pos);
982 struct intel_engine_cs *engine = barrier_to_engine(node);
983 struct rb_node **p, *parent;
984
985 spin_lock_irqsave_nested(&ref->tree_lock, flags,
986 SINGLE_DEPTH_NESTING);
987 parent = NULL;
988 p = &ref->tree.rb_node;
989 while (*p) {
990 struct active_node *it;
991
992 parent = *p;
993
994 it = rb_entry(parent, struct active_node, node);
995 if (it->timeline < node->timeline)
996 p = &parent->rb_right;
997 else
998 p = &parent->rb_left;
999 }
1000 rb_link_node(&node->node, parent, p);
1001 rb_insert_color(&node->node, &ref->tree);
1002 spin_unlock_irqrestore(&ref->tree_lock, flags);
1003
1004 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
1005 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
1006 intel_engine_pm_put_delay(engine, 1);
1007 }
1008}
1009
1010static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
1011{
1012 return __active_fence_slot(&barrier_from_ll(node)->base);
1013}
1014
1015void i915_request_add_active_barriers(struct i915_request *rq)
1016{
1017 struct intel_engine_cs *engine = rq->engine;
1018 struct llist_node *node, *next;
1019 unsigned long flags;
1020
1021 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
1022 GEM_BUG_ON(intel_engine_is_virtual(engine));
1023 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
1024
1025 node = llist_del_all(&engine->barrier_tasks);
1026 if (!node)
1027 return;
1028
1029
1030
1031
1032
1033 spin_lock_irqsave(&rq->lock, flags);
1034 llist_for_each_safe(node, next, node) {
1035
1036 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1037 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1038 }
1039 spin_unlock_irqrestore(&rq->lock, flags);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054struct dma_fence *
1055__i915_active_fence_set(struct i915_active_fence *active,
1056 struct dma_fence *fence)
1057{
1058 struct dma_fence *prev;
1059 unsigned long flags;
1060
1061 if (fence == rcu_access_pointer(active->fence))
1062 return fence;
1063
1064 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 spin_lock_irqsave(fence->lock, flags);
1087 prev = xchg(__active_fence_slot(active), fence);
1088 if (prev) {
1089 GEM_BUG_ON(prev == fence);
1090 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1091 __list_del_entry(&active->cb.node);
1092 spin_unlock(prev->lock);
1093 }
1094 list_add_tail(&active->cb.node, &fence->cb_list);
1095 spin_unlock_irqrestore(fence->lock, flags);
1096
1097 return prev;
1098}
1099
1100int i915_active_fence_set(struct i915_active_fence *active,
1101 struct i915_request *rq)
1102{
1103 struct dma_fence *fence;
1104 int err = 0;
1105
1106
1107 rcu_read_lock();
1108 fence = __i915_active_fence_set(active, &rq->fence);
1109 if (fence)
1110 fence = dma_fence_get_rcu(fence);
1111 rcu_read_unlock();
1112 if (fence) {
1113 err = i915_request_await_dma_fence(rq, fence);
1114 dma_fence_put(fence);
1115 }
1116
1117 return err;
1118}
1119
1120void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1121{
1122 active_fence_cb(fence, cb);
1123}
1124
1125struct auto_active {
1126 struct i915_active base;
1127 struct kref ref;
1128};
1129
1130struct i915_active *i915_active_get(struct i915_active *ref)
1131{
1132 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1133
1134 kref_get(&aa->ref);
1135 return &aa->base;
1136}
1137
1138static void auto_release(struct kref *ref)
1139{
1140 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1141
1142 i915_active_fini(&aa->base);
1143 kfree(aa);
1144}
1145
1146void i915_active_put(struct i915_active *ref)
1147{
1148 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1149
1150 kref_put(&aa->ref, auto_release);
1151}
1152
1153static int auto_active(struct i915_active *ref)
1154{
1155 i915_active_get(ref);
1156 return 0;
1157}
1158
1159__i915_active_call static void
1160auto_retire(struct i915_active *ref)
1161{
1162 i915_active_put(ref);
1163}
1164
1165struct i915_active *i915_active_create(void)
1166{
1167 struct auto_active *aa;
1168
1169 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1170 if (!aa)
1171 return NULL;
1172
1173 kref_init(&aa->ref);
1174 i915_active_init(&aa->base, auto_active, auto_retire);
1175
1176 return &aa->base;
1177}
1178
1179#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1180#include "selftests/i915_active.c"
1181#endif
1182
1183static void i915_global_active_shrink(void)
1184{
1185 kmem_cache_shrink(global.slab_cache);
1186}
1187
1188static void i915_global_active_exit(void)
1189{
1190 kmem_cache_destroy(global.slab_cache);
1191}
1192
1193static struct i915_global_active global = { {
1194 .shrink = i915_global_active_shrink,
1195 .exit = i915_global_active_exit,
1196} };
1197
1198int __init i915_global_active_init(void)
1199{
1200 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
1201 if (!global.slab_cache)
1202 return -ENOMEM;
1203
1204 i915_global_register(&global.base);
1205 return 0;
1206}
1207