1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/dma-fence-array.h>
23
24static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
25{
26 return "dma_fence_array";
27}
28
29static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
30{
31 return "unbound";
32}
33
34static void irq_dma_fence_array_work(struct irq_work *wrk)
35{
36 struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
37
38 dma_fence_signal(&array->base);
39 dma_fence_put(&array->base);
40}
41
42static void dma_fence_array_cb_func(struct dma_fence *f,
43 struct dma_fence_cb *cb)
44{
45 struct dma_fence_array_cb *array_cb =
46 container_of(cb, struct dma_fence_array_cb, cb);
47 struct dma_fence_array *array = array_cb->array;
48
49 if (atomic_dec_and_test(&array->num_pending))
50 irq_work_queue(&array->work);
51 else
52 dma_fence_put(&array->base);
53}
54
55static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
56{
57 struct dma_fence_array *array = to_dma_fence_array(fence);
58 struct dma_fence_array_cb *cb = (void *)(&array[1]);
59 unsigned i;
60
61 for (i = 0; i < array->num_fences; ++i) {
62 cb[i].array = array;
63
64
65
66
67
68
69
70
71 dma_fence_get(&array->base);
72 if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
73 dma_fence_array_cb_func)) {
74 dma_fence_put(&array->base);
75 if (atomic_dec_and_test(&array->num_pending))
76 return false;
77 }
78 }
79
80 return true;
81}
82
83static bool dma_fence_array_signaled(struct dma_fence *fence)
84{
85 struct dma_fence_array *array = to_dma_fence_array(fence);
86
87 return atomic_read(&array->num_pending) <= 0;
88}
89
90static void dma_fence_array_release(struct dma_fence *fence)
91{
92 struct dma_fence_array *array = to_dma_fence_array(fence);
93 unsigned i;
94
95 for (i = 0; i < array->num_fences; ++i)
96 dma_fence_put(array->fences[i]);
97
98 kfree(array->fences);
99 dma_fence_free(fence);
100}
101
102const struct dma_fence_ops dma_fence_array_ops = {
103 .get_driver_name = dma_fence_array_get_driver_name,
104 .get_timeline_name = dma_fence_array_get_timeline_name,
105 .enable_signaling = dma_fence_array_enable_signaling,
106 .signaled = dma_fence_array_signaled,
107 .wait = dma_fence_default_wait,
108 .release = dma_fence_array_release,
109};
110EXPORT_SYMBOL(dma_fence_array_ops);
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131struct dma_fence_array *dma_fence_array_create(int num_fences,
132 struct dma_fence **fences,
133 u64 context, unsigned seqno,
134 bool signal_on_any)
135{
136 struct dma_fence_array *array;
137 size_t size = sizeof(*array);
138
139
140 size += num_fences * sizeof(struct dma_fence_array_cb);
141 array = kzalloc(size, GFP_KERNEL);
142 if (!array)
143 return NULL;
144
145 spin_lock_init(&array->lock);
146 dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
147 context, seqno);
148 init_irq_work(&array->work, irq_dma_fence_array_work);
149
150 array->num_fences = num_fences;
151 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
152 array->fences = fences;
153
154 return array;
155}
156EXPORT_SYMBOL(dma_fence_array_create);
157
158
159
160
161
162
163
164
165
166
167bool dma_fence_match_context(struct dma_fence *fence, u64 context)
168{
169 struct dma_fence_array *array = to_dma_fence_array(fence);
170 unsigned i;
171
172 if (!dma_fence_is_array(fence))
173 return fence->context == context;
174
175 for (i = 0; i < array->num_fences; i++) {
176 if (array->fences[i]->context != context)
177 return false;
178 }
179
180 return true;
181}
182EXPORT_SYMBOL(dma_fence_match_context);
183