1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/slab.h>
22#include <linux/export.h>
23#include <linux/atomic.h>
24#include <linux/dma-fence.h>
25
26#define CREATE_TRACE_POINTS
27#include <trace/events/dma_fence.h>
28
29EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
30EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
31
32
33
34
35
36
37
38static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
39
40
41
42
43
44
45
46
47u64 dma_fence_context_alloc(unsigned num)
48{
49 BUG_ON(!num);
50 return atomic64_add_return(num, &dma_fence_context_counter) - num;
51}
52EXPORT_SYMBOL(dma_fence_context_alloc);
53
54
55
56
57
58
59
60
61
62
63
64
65
66int dma_fence_signal_locked(struct dma_fence *fence)
67{
68 struct dma_fence_cb *cur, *tmp;
69 int ret = 0;
70
71 lockdep_assert_held(fence->lock);
72
73 if (WARN_ON(!fence))
74 return -EINVAL;
75
76 if (!ktime_to_ns(fence->timestamp)) {
77 fence->timestamp = ktime_get();
78 smp_mb__before_atomic();
79 }
80
81 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
82 ret = -EINVAL;
83
84
85
86
87
88 } else
89 trace_dma_fence_signaled(fence);
90
91 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
92 list_del_init(&cur->node);
93 cur->func(fence, cur);
94 }
95 return ret;
96}
97EXPORT_SYMBOL(dma_fence_signal_locked);
98
99
100
101
102
103
104
105
106
107
108
109int dma_fence_signal(struct dma_fence *fence)
110{
111 unsigned long flags;
112
113 if (!fence)
114 return -EINVAL;
115
116 if (!ktime_to_ns(fence->timestamp)) {
117 fence->timestamp = ktime_get();
118 smp_mb__before_atomic();
119 }
120
121 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
122 return -EINVAL;
123
124 trace_dma_fence_signaled(fence);
125
126 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
127 struct dma_fence_cb *cur, *tmp;
128
129 spin_lock_irqsave(fence->lock, flags);
130 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
131 list_del_init(&cur->node);
132 cur->func(fence, cur);
133 }
134 spin_unlock_irqrestore(fence->lock, flags);
135 }
136 return 0;
137}
138EXPORT_SYMBOL(dma_fence_signal);
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156signed long
157dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
158{
159 signed long ret;
160
161 if (WARN_ON(timeout < 0))
162 return -EINVAL;
163
164 trace_dma_fence_wait_start(fence);
165 ret = fence->ops->wait(fence, intr, timeout);
166 trace_dma_fence_wait_end(fence);
167 return ret;
168}
169EXPORT_SYMBOL(dma_fence_wait_timeout);
170
171void dma_fence_release(struct kref *kref)
172{
173 struct dma_fence *fence =
174 container_of(kref, struct dma_fence, refcount);
175
176 trace_dma_fence_destroy(fence);
177
178 BUG_ON(!list_empty(&fence->cb_list));
179
180 if (fence->ops->release)
181 fence->ops->release(fence);
182 else
183 dma_fence_free(fence);
184}
185EXPORT_SYMBOL(dma_fence_release);
186
187void dma_fence_free(struct dma_fence *fence)
188{
189 kfree_rcu(fence, rcu);
190}
191EXPORT_SYMBOL(dma_fence_free);
192
193
194
195
196
197
198
199
200void dma_fence_enable_sw_signaling(struct dma_fence *fence)
201{
202 unsigned long flags;
203
204 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
205 &fence->flags) &&
206 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
207 trace_dma_fence_enable_signal(fence);
208
209 spin_lock_irqsave(fence->lock, flags);
210
211 if (!fence->ops->enable_signaling(fence))
212 dma_fence_signal_locked(fence);
213
214 spin_unlock_irqrestore(fence->lock, flags);
215 }
216}
217EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
243 dma_fence_func_t func)
244{
245 unsigned long flags;
246 int ret = 0;
247 bool was_set;
248
249 if (WARN_ON(!fence || !func))
250 return -EINVAL;
251
252 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
253 INIT_LIST_HEAD(&cb->node);
254 return -ENOENT;
255 }
256
257 spin_lock_irqsave(fence->lock, flags);
258
259 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
260 &fence->flags);
261
262 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
263 ret = -ENOENT;
264 else if (!was_set) {
265 trace_dma_fence_enable_signal(fence);
266
267 if (!fence->ops->enable_signaling(fence)) {
268 dma_fence_signal_locked(fence);
269 ret = -ENOENT;
270 }
271 }
272
273 if (!ret) {
274 cb->func = func;
275 list_add_tail(&cb->node, &fence->cb_list);
276 } else
277 INIT_LIST_HEAD(&cb->node);
278 spin_unlock_irqrestore(fence->lock, flags);
279
280 return ret;
281}
282EXPORT_SYMBOL(dma_fence_add_callback);
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299bool
300dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
301{
302 unsigned long flags;
303 bool ret;
304
305 spin_lock_irqsave(fence->lock, flags);
306
307 ret = !list_empty(&cb->node);
308 if (ret)
309 list_del_init(&cb->node);
310
311 spin_unlock_irqrestore(fence->lock, flags);
312
313 return ret;
314}
315EXPORT_SYMBOL(dma_fence_remove_callback);
316
317struct default_wait_cb {
318 struct dma_fence_cb base;
319 struct task_struct *task;
320};
321
322static void
323dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
324{
325 struct default_wait_cb *wait =
326 container_of(cb, struct default_wait_cb, base);
327
328 wake_up_state(wait->task, TASK_NORMAL);
329}
330
331
332
333
334
335
336
337
338
339
340
341
342
343signed long
344dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
345{
346 struct default_wait_cb cb;
347 unsigned long flags;
348 signed long ret = timeout ? timeout : 1;
349 bool was_set;
350
351 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
352 return ret;
353
354 spin_lock_irqsave(fence->lock, flags);
355
356 if (intr && signal_pending(current)) {
357 ret = -ERESTARTSYS;
358 goto out;
359 }
360
361 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
362 &fence->flags);
363
364 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
365 goto out;
366
367 if (!was_set) {
368 trace_dma_fence_enable_signal(fence);
369
370 if (!fence->ops->enable_signaling(fence)) {
371 dma_fence_signal_locked(fence);
372 goto out;
373 }
374 }
375
376 cb.base.func = dma_fence_default_wait_cb;
377 cb.task = current;
378 list_add(&cb.base.node, &fence->cb_list);
379
380 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
381 if (intr)
382 __set_current_state(TASK_INTERRUPTIBLE);
383 else
384 __set_current_state(TASK_UNINTERRUPTIBLE);
385 spin_unlock_irqrestore(fence->lock, flags);
386
387 ret = schedule_timeout(ret);
388
389 spin_lock_irqsave(fence->lock, flags);
390 if (ret > 0 && intr && signal_pending(current))
391 ret = -ERESTARTSYS;
392 }
393
394 if (!list_empty(&cb.base.node))
395 list_del(&cb.base.node);
396 __set_current_state(TASK_RUNNING);
397
398out:
399 spin_unlock_irqrestore(fence->lock, flags);
400 return ret;
401}
402EXPORT_SYMBOL(dma_fence_default_wait);
403
404static bool
405dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
406 uint32_t *idx)
407{
408 int i;
409
410 for (i = 0; i < count; ++i) {
411 struct dma_fence *fence = fences[i];
412 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
413 if (idx)
414 *idx = i;
415 return true;
416 }
417 }
418 return false;
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439signed long
440dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
441 bool intr, signed long timeout, uint32_t *idx)
442{
443 struct default_wait_cb *cb;
444 signed long ret = timeout;
445 unsigned i;
446
447 if (WARN_ON(!fences || !count || timeout < 0))
448 return -EINVAL;
449
450 if (timeout == 0) {
451 for (i = 0; i < count; ++i)
452 if (dma_fence_is_signaled(fences[i])) {
453 if (idx)
454 *idx = i;
455 return 1;
456 }
457
458 return 0;
459 }
460
461 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
462 if (cb == NULL) {
463 ret = -ENOMEM;
464 goto err_free_cb;
465 }
466
467 for (i = 0; i < count; ++i) {
468 struct dma_fence *fence = fences[i];
469
470 if (fence->ops->wait != dma_fence_default_wait) {
471 ret = -EINVAL;
472 goto fence_rm_cb;
473 }
474
475 cb[i].task = current;
476 if (dma_fence_add_callback(fence, &cb[i].base,
477 dma_fence_default_wait_cb)) {
478
479 if (idx)
480 *idx = i;
481 goto fence_rm_cb;
482 }
483 }
484
485 while (ret > 0) {
486 if (intr)
487 set_current_state(TASK_INTERRUPTIBLE);
488 else
489 set_current_state(TASK_UNINTERRUPTIBLE);
490
491 if (dma_fence_test_signaled_any(fences, count, idx))
492 break;
493
494 ret = schedule_timeout(ret);
495
496 if (ret > 0 && intr && signal_pending(current))
497 ret = -ERESTARTSYS;
498 }
499
500 __set_current_state(TASK_RUNNING);
501
502fence_rm_cb:
503 while (i-- > 0)
504 dma_fence_remove_callback(fences[i], &cb[i].base);
505
506err_free_cb:
507 kfree(cb);
508
509 return ret;
510}
511EXPORT_SYMBOL(dma_fence_wait_any_timeout);
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529void
530dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
531 spinlock_t *lock, u64 context, unsigned seqno)
532{
533 BUG_ON(!lock);
534 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
535 !ops->get_driver_name || !ops->get_timeline_name);
536
537 kref_init(&fence->refcount);
538 fence->ops = ops;
539 INIT_LIST_HEAD(&fence->cb_list);
540 fence->lock = lock;
541 fence->context = context;
542 fence->seqno = seqno;
543 fence->flags = 0UL;
544
545 trace_dma_fence_init(fence);
546}
547EXPORT_SYMBOL(dma_fence_init);
548