1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/atomic.h>
16#include <linux/dma-fence.h>
17#include <linux/sched/signal.h>
18
19#define CREATE_TRACE_POINTS
20#include <trace/events/dma_fence.h>
21
22EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
23EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
24EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
25
26static DEFINE_SPINLOCK(dma_fence_stub_lock);
27static struct dma_fence dma_fence_stub;
28
29
30
31
32
33
34
35static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67static const char *dma_fence_stub_get_name(struct dma_fence *fence)
68{
69 return "stub";
70}
71
72static const struct dma_fence_ops dma_fence_stub_ops = {
73 .get_driver_name = dma_fence_stub_get_name,
74 .get_timeline_name = dma_fence_stub_get_name,
75};
76
77
78
79
80
81
82struct dma_fence *dma_fence_get_stub(void)
83{
84 spin_lock(&dma_fence_stub_lock);
85 if (!dma_fence_stub.ops) {
86 dma_fence_init(&dma_fence_stub,
87 &dma_fence_stub_ops,
88 &dma_fence_stub_lock,
89 0, 0);
90 dma_fence_signal_locked(&dma_fence_stub);
91 }
92 spin_unlock(&dma_fence_stub_lock);
93
94 return dma_fence_get(&dma_fence_stub);
95}
96EXPORT_SYMBOL(dma_fence_get_stub);
97
98
99
100
101
102
103
104
105
106u64 dma_fence_context_alloc(unsigned num)
107{
108 WARN_ON(!num);
109 return atomic64_add_return(num, &dma_fence_context_counter) - num;
110}
111EXPORT_SYMBOL(dma_fence_context_alloc);
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129int dma_fence_signal_locked(struct dma_fence *fence)
130{
131 struct dma_fence_cb *cur, *tmp;
132 int ret = 0;
133
134 lockdep_assert_held(fence->lock);
135
136 if (WARN_ON(!fence))
137 return -EINVAL;
138
139 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
140 ret = -EINVAL;
141
142
143
144
145
146 } else {
147 fence->timestamp = ktime_get();
148 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
149 trace_dma_fence_signaled(fence);
150 }
151
152 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
153 list_del_init(&cur->node);
154 cur->func(fence, cur);
155 }
156 return ret;
157}
158EXPORT_SYMBOL(dma_fence_signal_locked);
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173int dma_fence_signal(struct dma_fence *fence)
174{
175 unsigned long flags;
176
177 if (!fence)
178 return -EINVAL;
179
180 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
181 return -EINVAL;
182
183 fence->timestamp = ktime_get();
184 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
185 trace_dma_fence_signaled(fence);
186
187 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
188 struct dma_fence_cb *cur, *tmp;
189
190 spin_lock_irqsave(fence->lock, flags);
191 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
192 list_del_init(&cur->node);
193 cur->func(fence, cur);
194 }
195 spin_unlock_irqrestore(fence->lock, flags);
196 }
197 return 0;
198}
199EXPORT_SYMBOL(dma_fence_signal);
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219signed long
220dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
221{
222 signed long ret;
223
224 if (WARN_ON(timeout < 0))
225 return -EINVAL;
226
227 trace_dma_fence_wait_start(fence);
228 if (fence->ops->wait)
229 ret = fence->ops->wait(fence, intr, timeout);
230 else
231 ret = dma_fence_default_wait(fence, intr, timeout);
232 trace_dma_fence_wait_end(fence);
233 return ret;
234}
235EXPORT_SYMBOL(dma_fence_wait_timeout);
236
237
238
239
240
241
242
243
244void dma_fence_release(struct kref *kref)
245{
246 struct dma_fence *fence =
247 container_of(kref, struct dma_fence, refcount);
248
249 trace_dma_fence_destroy(fence);
250
251 if (WARN(!list_empty(&fence->cb_list),
252 "Fence %s:%s:%llx:%llx released with pending signals!\n",
253 fence->ops->get_driver_name(fence),
254 fence->ops->get_timeline_name(fence),
255 fence->context, fence->seqno)) {
256 unsigned long flags;
257
258
259
260
261
262
263
264
265 spin_lock_irqsave(fence->lock, flags);
266 fence->error = -EDEADLK;
267 dma_fence_signal_locked(fence);
268 spin_unlock_irqrestore(fence->lock, flags);
269 }
270
271 if (fence->ops->release)
272 fence->ops->release(fence);
273 else
274 dma_fence_free(fence);
275}
276EXPORT_SYMBOL(dma_fence_release);
277
278
279
280
281
282
283
284
285void dma_fence_free(struct dma_fence *fence)
286{
287 kfree_rcu(fence, rcu);
288}
289EXPORT_SYMBOL(dma_fence_free);
290
291
292
293
294
295
296
297
298
299void dma_fence_enable_sw_signaling(struct dma_fence *fence)
300{
301 unsigned long flags;
302
303 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
304 &fence->flags) &&
305 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
306 fence->ops->enable_signaling) {
307 trace_dma_fence_enable_signal(fence);
308
309 spin_lock_irqsave(fence->lock, flags);
310
311 if (!fence->ops->enable_signaling(fence))
312 dma_fence_signal_locked(fence);
313
314 spin_unlock_irqrestore(fence->lock, flags);
315 }
316}
317EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
345 dma_fence_func_t func)
346{
347 unsigned long flags;
348 int ret = 0;
349 bool was_set;
350
351 if (WARN_ON(!fence || !func))
352 return -EINVAL;
353
354 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
355 INIT_LIST_HEAD(&cb->node);
356 return -ENOENT;
357 }
358
359 spin_lock_irqsave(fence->lock, flags);
360
361 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
362 &fence->flags);
363
364 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
365 ret = -ENOENT;
366 else if (!was_set && fence->ops->enable_signaling) {
367 trace_dma_fence_enable_signal(fence);
368
369 if (!fence->ops->enable_signaling(fence)) {
370 dma_fence_signal_locked(fence);
371 ret = -ENOENT;
372 }
373 }
374
375 if (!ret) {
376 cb->func = func;
377 list_add_tail(&cb->node, &fence->cb_list);
378 } else
379 INIT_LIST_HEAD(&cb->node);
380 spin_unlock_irqrestore(fence->lock, flags);
381
382 return ret;
383}
384EXPORT_SYMBOL(dma_fence_add_callback);
385
386
387
388
389
390
391
392
393
394
395
396
397
398int dma_fence_get_status(struct dma_fence *fence)
399{
400 unsigned long flags;
401 int status;
402
403 spin_lock_irqsave(fence->lock, flags);
404 status = dma_fence_get_status_locked(fence);
405 spin_unlock_irqrestore(fence->lock, flags);
406
407 return status;
408}
409EXPORT_SYMBOL(dma_fence_get_status);
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429bool
430dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
431{
432 unsigned long flags;
433 bool ret;
434
435 spin_lock_irqsave(fence->lock, flags);
436
437 ret = !list_empty(&cb->node);
438 if (ret)
439 list_del_init(&cb->node);
440
441 spin_unlock_irqrestore(fence->lock, flags);
442
443 return ret;
444}
445EXPORT_SYMBOL(dma_fence_remove_callback);
446
447struct default_wait_cb {
448 struct dma_fence_cb base;
449 struct task_struct *task;
450};
451
452static void
453dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
454{
455 struct default_wait_cb *wait =
456 container_of(cb, struct default_wait_cb, base);
457
458 wake_up_state(wait->task, TASK_NORMAL);
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473signed long
474dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
475{
476 struct default_wait_cb cb;
477 unsigned long flags;
478 signed long ret = timeout ? timeout : 1;
479 bool was_set;
480
481 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
482 return ret;
483
484 spin_lock_irqsave(fence->lock, flags);
485
486 if (intr && signal_pending(current)) {
487 ret = -ERESTARTSYS;
488 goto out;
489 }
490
491 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
492 &fence->flags);
493
494 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
495 goto out;
496
497 if (!was_set && fence->ops->enable_signaling) {
498 trace_dma_fence_enable_signal(fence);
499
500 if (!fence->ops->enable_signaling(fence)) {
501 dma_fence_signal_locked(fence);
502 goto out;
503 }
504 }
505
506 if (!timeout) {
507 ret = 0;
508 goto out;
509 }
510
511 cb.base.func = dma_fence_default_wait_cb;
512 cb.task = current;
513 list_add(&cb.base.node, &fence->cb_list);
514
515 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
516 if (intr)
517 __set_current_state(TASK_INTERRUPTIBLE);
518 else
519 __set_current_state(TASK_UNINTERRUPTIBLE);
520 spin_unlock_irqrestore(fence->lock, flags);
521
522 ret = schedule_timeout(ret);
523
524 spin_lock_irqsave(fence->lock, flags);
525 if (ret > 0 && intr && signal_pending(current))
526 ret = -ERESTARTSYS;
527 }
528
529 if (!list_empty(&cb.base.node))
530 list_del(&cb.base.node);
531 __set_current_state(TASK_RUNNING);
532
533out:
534 spin_unlock_irqrestore(fence->lock, flags);
535 return ret;
536}
537EXPORT_SYMBOL(dma_fence_default_wait);
538
539static bool
540dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
541 uint32_t *idx)
542{
543 int i;
544
545 for (i = 0; i < count; ++i) {
546 struct dma_fence *fence = fences[i];
547 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
548 if (idx)
549 *idx = i;
550 return true;
551 }
552 }
553 return false;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576signed long
577dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
578 bool intr, signed long timeout, uint32_t *idx)
579{
580 struct default_wait_cb *cb;
581 signed long ret = timeout;
582 unsigned i;
583
584 if (WARN_ON(!fences || !count || timeout < 0))
585 return -EINVAL;
586
587 if (timeout == 0) {
588 for (i = 0; i < count; ++i)
589 if (dma_fence_is_signaled(fences[i])) {
590 if (idx)
591 *idx = i;
592 return 1;
593 }
594
595 return 0;
596 }
597
598 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
599 if (cb == NULL) {
600 ret = -ENOMEM;
601 goto err_free_cb;
602 }
603
604 for (i = 0; i < count; ++i) {
605 struct dma_fence *fence = fences[i];
606
607 cb[i].task = current;
608 if (dma_fence_add_callback(fence, &cb[i].base,
609 dma_fence_default_wait_cb)) {
610
611 if (idx)
612 *idx = i;
613 goto fence_rm_cb;
614 }
615 }
616
617 while (ret > 0) {
618 if (intr)
619 set_current_state(TASK_INTERRUPTIBLE);
620 else
621 set_current_state(TASK_UNINTERRUPTIBLE);
622
623 if (dma_fence_test_signaled_any(fences, count, idx))
624 break;
625
626 ret = schedule_timeout(ret);
627
628 if (ret > 0 && intr && signal_pending(current))
629 ret = -ERESTARTSYS;
630 }
631
632 __set_current_state(TASK_RUNNING);
633
634fence_rm_cb:
635 while (i-- > 0)
636 dma_fence_remove_callback(fences[i], &cb[i].base);
637
638err_free_cb:
639 kfree(cb);
640
641 return ret;
642}
643EXPORT_SYMBOL(dma_fence_wait_any_timeout);
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660void
661dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
662 spinlock_t *lock, u64 context, u64 seqno)
663{
664 BUG_ON(!lock);
665 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
666
667 kref_init(&fence->refcount);
668 fence->ops = ops;
669 INIT_LIST_HEAD(&fence->cb_list);
670 fence->lock = lock;
671 fence->context = context;
672 fence->seqno = seqno;
673 fence->flags = 0UL;
674 fence->error = 0;
675
676 trace_dma_fence_init(fence);
677}
678EXPORT_SYMBOL(dma_fence_init);
679