1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/slab.h>
14#include <linux/dma-fence-array.h>
15
16static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
17{
18 return "dma_fence_array";
19}
20
21static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
22{
23 return "unbound";
24}
25
26static void irq_dma_fence_array_work(struct irq_work *wrk)
27{
28 struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
29
30 dma_fence_signal(&array->base);
31 dma_fence_put(&array->base);
32}
33
34static void dma_fence_array_cb_func(struct dma_fence *f,
35 struct dma_fence_cb *cb)
36{
37 struct dma_fence_array_cb *array_cb =
38 container_of(cb, struct dma_fence_array_cb, cb);
39 struct dma_fence_array *array = array_cb->array;
40
41 if (atomic_dec_and_test(&array->num_pending))
42 irq_work_queue(&array->work);
43 else
44 dma_fence_put(&array->base);
45}
46
47static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
48{
49 struct dma_fence_array *array = to_dma_fence_array(fence);
50 struct dma_fence_array_cb *cb = (void *)(&array[1]);
51 unsigned i;
52
53 for (i = 0; i < array->num_fences; ++i) {
54 cb[i].array = array;
55
56
57
58
59
60
61
62
63 dma_fence_get(&array->base);
64 if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
65 dma_fence_array_cb_func)) {
66 dma_fence_put(&array->base);
67 if (atomic_dec_and_test(&array->num_pending))
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool dma_fence_array_signaled(struct dma_fence *fence)
76{
77 struct dma_fence_array *array = to_dma_fence_array(fence);
78
79 return atomic_read(&array->num_pending) <= 0;
80}
81
82static void dma_fence_array_release(struct dma_fence *fence)
83{
84 struct dma_fence_array *array = to_dma_fence_array(fence);
85 unsigned i;
86
87 for (i = 0; i < array->num_fences; ++i)
88 dma_fence_put(array->fences[i]);
89
90 kfree(array->fences);
91 dma_fence_free(fence);
92}
93
94const struct dma_fence_ops dma_fence_array_ops = {
95 .get_driver_name = dma_fence_array_get_driver_name,
96 .get_timeline_name = dma_fence_array_get_timeline_name,
97 .enable_signaling = dma_fence_array_enable_signaling,
98 .signaled = dma_fence_array_signaled,
99 .release = dma_fence_array_release,
100};
101EXPORT_SYMBOL(dma_fence_array_ops);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122struct dma_fence_array *dma_fence_array_create(int num_fences,
123 struct dma_fence **fences,
124 u64 context, unsigned seqno,
125 bool signal_on_any)
126{
127 struct dma_fence_array *array;
128 size_t size = sizeof(*array);
129
130
131 size += num_fences * sizeof(struct dma_fence_array_cb);
132 array = kzalloc(size, GFP_KERNEL);
133 if (!array)
134 return NULL;
135
136 spin_lock_init(&array->lock);
137 dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
138 context, seqno);
139 init_irq_work(&array->work, irq_dma_fence_array_work);
140
141 array->num_fences = num_fences;
142 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
143 array->fences = fences;
144
145 return array;
146}
147EXPORT_SYMBOL(dma_fence_array_create);
148
149
150
151
152
153
154
155
156
157
158bool dma_fence_match_context(struct dma_fence *fence, u64 context)
159{
160 struct dma_fence_array *array = to_dma_fence_array(fence);
161 unsigned i;
162
163 if (!dma_fence_is_array(fence))
164 return fence->context == context;
165
166 for (i = 0; i < array->num_fences; i++) {
167 if (array->fences[i]->context != context)
168 return false;
169 }
170
171 return true;
172}
173EXPORT_SYMBOL(dma_fence_match_context);
174