1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/atomic.h>
33#include <linux/firmware.h>
34#include <linux/kref.h>
35#include <linux/sched/signal.h>
36#include <linux/seq_file.h>
37#include <linux/slab.h>
38#include <linux/wait.h>
39
40#include <drm/drm_debugfs.h>
41#include <drm/drm_device.h>
42#include <drm/drm_file.h>
43
44#include "radeon.h"
45#include "radeon_reg.h"
46#include "radeon_trace.h"
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
69{
70 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
71 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
72 if (drv->cpu_addr) {
73 *drv->cpu_addr = cpu_to_le32(seq);
74 }
75 } else {
76 WREG32(drv->scratch_reg, seq);
77 }
78}
79
80
81
82
83
84
85
86
87
88
89static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
90{
91 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
92 u32 seq = 0;
93
94 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
95 if (drv->cpu_addr) {
96 seq = le32_to_cpu(*drv->cpu_addr);
97 } else {
98 seq = lower_32_bits(atomic64_read(&drv->last_seq));
99 }
100 } else {
101 seq = RREG32(drv->scratch_reg);
102 }
103 return seq;
104}
105
106
107
108
109
110
111
112
113
114static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
115{
116
117
118
119
120 queue_delayed_work(system_power_efficient_wq,
121 &rdev->fence_drv[ring].lockup_work,
122 RADEON_FENCE_JIFFIES_TIMEOUT);
123}
124
125
126
127
128
129
130
131
132
133
134
135int radeon_fence_emit(struct radeon_device *rdev,
136 struct radeon_fence **fence,
137 int ring)
138{
139 u64 seq;
140
141
142 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
143 if ((*fence) == NULL) {
144 return -ENOMEM;
145 }
146 (*fence)->rdev = rdev;
147 (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
148 (*fence)->ring = ring;
149 (*fence)->is_vm_update = false;
150 dma_fence_init(&(*fence)->base, &radeon_fence_ops,
151 &rdev->fence_queue.lock,
152 rdev->fence_context + ring,
153 seq);
154 radeon_fence_ring_emit(rdev, ring, *fence);
155 trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
156 radeon_fence_schedule_check(rdev, ring);
157 return 0;
158}
159
160
161
162
163
164
165
166
167static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
168{
169 struct radeon_fence *fence;
170 u64 seq;
171
172 fence = container_of(wait, struct radeon_fence, fence_wake);
173
174
175
176
177
178 seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
179 if (seq >= fence->seq) {
180 int ret = dma_fence_signal_locked(&fence->base);
181
182 if (!ret)
183 DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
184 else
185 DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
186
187 radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
188 __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
189 dma_fence_put(&fence->base);
190 } else
191 DMA_FENCE_TRACE(&fence->base, "pending\n");
192 return 0;
193}
194
195
196
197
198
199
200
201
202
203
204
205static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
206{
207 uint64_t seq, last_seq, last_emitted;
208 unsigned count_loop = 0;
209 bool wake = false;
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
233 do {
234 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
235 seq = radeon_fence_read(rdev, ring);
236 seq |= last_seq & 0xffffffff00000000LL;
237 if (seq < last_seq) {
238 seq &= 0xffffffff;
239 seq |= last_emitted & 0xffffffff00000000LL;
240 }
241
242 if (seq <= last_seq || seq > last_emitted) {
243 break;
244 }
245
246
247
248
249 wake = true;
250 last_seq = seq;
251 if ((count_loop++) > 10) {
252
253
254
255
256
257 break;
258 }
259 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
260
261 if (seq < last_emitted)
262 radeon_fence_schedule_check(rdev, ring);
263
264 return wake;
265}
266
267
268
269
270
271
272
273
274
275static void radeon_fence_check_lockup(struct work_struct *work)
276{
277 struct radeon_fence_driver *fence_drv;
278 struct radeon_device *rdev;
279 int ring;
280
281 fence_drv = container_of(work, struct radeon_fence_driver,
282 lockup_work.work);
283 rdev = fence_drv->rdev;
284 ring = fence_drv - &rdev->fence_drv[0];
285
286 if (!down_read_trylock(&rdev->exclusive_lock)) {
287
288 radeon_fence_schedule_check(rdev, ring);
289 return;
290 }
291
292 if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
293 unsigned long irqflags;
294
295 fence_drv->delayed_irq = false;
296 spin_lock_irqsave(&rdev->irq.lock, irqflags);
297 radeon_irq_set(rdev);
298 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
299 }
300
301 if (radeon_fence_activity(rdev, ring))
302 wake_up_all(&rdev->fence_queue);
303
304 else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
305
306
307 dev_warn(rdev->dev, "GPU lockup (current fence id "
308 "0x%016llx last fence id 0x%016llx on ring %d)\n",
309 (uint64_t)atomic64_read(&fence_drv->last_seq),
310 fence_drv->sync_seq[ring], ring);
311
312
313 rdev->needs_reset = true;
314 wake_up_all(&rdev->fence_queue);
315 }
316 up_read(&rdev->exclusive_lock);
317}
318
319
320
321
322
323
324
325
326
327
328void radeon_fence_process(struct radeon_device *rdev, int ring)
329{
330 if (radeon_fence_activity(rdev, ring))
331 wake_up_all(&rdev->fence_queue);
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
349 u64 seq, unsigned ring)
350{
351 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
352 return true;
353 }
354
355 radeon_fence_process(rdev, ring);
356 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
357 return true;
358 }
359 return false;
360}
361
362static bool radeon_fence_is_signaled(struct dma_fence *f)
363{
364 struct radeon_fence *fence = to_radeon_fence(f);
365 struct radeon_device *rdev = fence->rdev;
366 unsigned ring = fence->ring;
367 u64 seq = fence->seq;
368
369 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
370 return true;
371 }
372
373 if (down_read_trylock(&rdev->exclusive_lock)) {
374 radeon_fence_process(rdev, ring);
375 up_read(&rdev->exclusive_lock);
376
377 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
378 return true;
379 }
380 }
381 return false;
382}
383
384
385
386
387
388
389
390
391
392static bool radeon_fence_enable_signaling(struct dma_fence *f)
393{
394 struct radeon_fence *fence = to_radeon_fence(f);
395 struct radeon_device *rdev = fence->rdev;
396
397 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
398 return false;
399
400 if (down_read_trylock(&rdev->exclusive_lock)) {
401 radeon_irq_kms_sw_irq_get(rdev, fence->ring);
402
403 if (radeon_fence_activity(rdev, fence->ring))
404 wake_up_all_locked(&rdev->fence_queue);
405
406
407 if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
408 radeon_irq_kms_sw_irq_put(rdev, fence->ring);
409 up_read(&rdev->exclusive_lock);
410 return false;
411 }
412
413 up_read(&rdev->exclusive_lock);
414 } else {
415
416 if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
417 rdev->fence_drv[fence->ring].delayed_irq = true;
418 radeon_fence_schedule_check(rdev, fence->ring);
419 }
420
421 fence->fence_wake.flags = 0;
422 fence->fence_wake.private = NULL;
423 fence->fence_wake.func = radeon_fence_check_signaled;
424 __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
425 dma_fence_get(f);
426
427 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
428 return true;
429}
430
431
432
433
434
435
436
437
438
439bool radeon_fence_signaled(struct radeon_fence *fence)
440{
441 if (!fence)
442 return true;
443
444 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
445 int ret;
446
447 ret = dma_fence_signal(&fence->base);
448 if (!ret)
449 DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
450 return true;
451 }
452 return false;
453}
454
455
456
457
458
459
460
461
462
463
464
465
466static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
467{
468 unsigned i;
469
470 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
471 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
472 return true;
473 }
474 return false;
475}
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
495 u64 *target_seq, bool intr,
496 long timeout)
497{
498 long r;
499 int i;
500
501 if (radeon_fence_any_seq_signaled(rdev, target_seq))
502 return timeout;
503
504
505 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
506 if (!target_seq[i])
507 continue;
508
509 trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
510 radeon_irq_kms_sw_irq_get(rdev, i);
511 }
512
513 if (intr) {
514 r = wait_event_interruptible_timeout(rdev->fence_queue, (
515 radeon_fence_any_seq_signaled(rdev, target_seq)
516 || rdev->needs_reset), timeout);
517 } else {
518 r = wait_event_timeout(rdev->fence_queue, (
519 radeon_fence_any_seq_signaled(rdev, target_seq)
520 || rdev->needs_reset), timeout);
521 }
522
523 if (rdev->needs_reset)
524 r = -EDEADLK;
525
526 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
527 if (!target_seq[i])
528 continue;
529
530 radeon_irq_kms_sw_irq_put(rdev, i);
531 trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
532 }
533
534 return r;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout)
551{
552 uint64_t seq[RADEON_NUM_RINGS] = {};
553 long r;
554 int r_sig;
555
556
557
558
559
560
561
562 if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
563 return dma_fence_wait(&fence->base, intr);
564
565 seq[fence->ring] = fence->seq;
566 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
567 if (r <= 0) {
568 return r;
569 }
570
571 r_sig = dma_fence_signal(&fence->base);
572 if (!r_sig)
573 DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
574 return r;
575}
576
577
578
579
580
581
582
583
584
585
586
587
588int radeon_fence_wait(struct radeon_fence *fence, bool intr)
589{
590 long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
591 if (r > 0) {
592 return 0;
593 } else {
594 return r;
595 }
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611int radeon_fence_wait_any(struct radeon_device *rdev,
612 struct radeon_fence **fences,
613 bool intr)
614{
615 uint64_t seq[RADEON_NUM_RINGS];
616 unsigned i, num_rings = 0;
617 long r;
618
619 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
620 seq[i] = 0;
621
622 if (!fences[i]) {
623 continue;
624 }
625
626 seq[i] = fences[i]->seq;
627 ++num_rings;
628 }
629
630
631 if (num_rings == 0)
632 return -ENOENT;
633
634 r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
635 if (r < 0) {
636 return r;
637 }
638 return 0;
639}
640
641
642
643
644
645
646
647
648
649
650
651int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
652{
653 uint64_t seq[RADEON_NUM_RINGS] = {};
654 long r;
655
656 seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
657 if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
658
659
660 return -ENOENT;
661 }
662 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
663 if (r < 0)
664 return r;
665 return 0;
666}
667
668
669
670
671
672
673
674
675
676
677
678int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
679{
680 uint64_t seq[RADEON_NUM_RINGS] = {};
681 long r;
682
683 seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
684 if (!seq[ring])
685 return 0;
686
687 r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
688 if (r < 0) {
689 if (r == -EDEADLK)
690 return -EDEADLK;
691
692 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
693 ring, r);
694 }
695 return 0;
696}
697
698
699
700
701
702
703
704
705
706struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
707{
708 dma_fence_get(&fence->base);
709 return fence;
710}
711
712
713
714
715
716
717
718
719void radeon_fence_unref(struct radeon_fence **fence)
720{
721 struct radeon_fence *tmp = *fence;
722
723 *fence = NULL;
724 if (tmp) {
725 dma_fence_put(&tmp->base);
726 }
727}
728
729
730
731
732
733
734
735
736
737
738
739unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
740{
741 uint64_t emitted;
742
743
744
745
746 radeon_fence_process(rdev, ring);
747 emitted = rdev->fence_drv[ring].sync_seq[ring]
748 - atomic64_read(&rdev->fence_drv[ring].last_seq);
749
750 if (emitted > 0x10000000) {
751 emitted = 0x10000000;
752 }
753 return (unsigned)emitted;
754}
755
756
757
758
759
760
761
762
763
764
765
766
767bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
768{
769 struct radeon_fence_driver *fdrv;
770
771 if (!fence) {
772 return false;
773 }
774
775 if (fence->ring == dst_ring) {
776 return false;
777 }
778
779
780 fdrv = &fence->rdev->fence_drv[dst_ring];
781 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
782 return false;
783 }
784
785 return true;
786}
787
788
789
790
791
792
793
794
795
796
797void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
798{
799 struct radeon_fence_driver *dst, *src;
800 unsigned i;
801
802 if (!fence) {
803 return;
804 }
805
806 if (fence->ring == dst_ring) {
807 return;
808 }
809
810
811 src = &fence->rdev->fence_drv[fence->ring];
812 dst = &fence->rdev->fence_drv[dst_ring];
813 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
814 if (i == dst_ring) {
815 continue;
816 }
817 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
818 }
819}
820
821
822
823
824
825
826
827
828
829
830
831
832
833int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
834{
835 uint64_t index;
836 int r;
837
838 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
839 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
840 rdev->fence_drv[ring].scratch_reg = 0;
841 if (ring != R600_RING_TYPE_UVD_INDEX) {
842 index = R600_WB_EVENT_OFFSET + ring * 4;
843 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
844 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
845 index;
846
847 } else {
848
849 index = ALIGN(rdev->uvd_fw->size, 8);
850 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
851 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
852 }
853
854 } else {
855 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
856 if (r) {
857 dev_err(rdev->dev, "fence failed to get scratch register\n");
858 return r;
859 }
860 index = RADEON_WB_SCRATCH_OFFSET +
861 rdev->fence_drv[ring].scratch_reg -
862 rdev->scratch.reg_base;
863 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
864 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
865 }
866 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
867 rdev->fence_drv[ring].initialized = true;
868 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx\n",
869 ring, rdev->fence_drv[ring].gpu_addr);
870 return 0;
871}
872
873
874
875
876
877
878
879
880
881
882
883static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
884{
885 int i;
886
887 rdev->fence_drv[ring].scratch_reg = -1;
888 rdev->fence_drv[ring].cpu_addr = NULL;
889 rdev->fence_drv[ring].gpu_addr = 0;
890 for (i = 0; i < RADEON_NUM_RINGS; ++i)
891 rdev->fence_drv[ring].sync_seq[i] = 0;
892 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
893 rdev->fence_drv[ring].initialized = false;
894 INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
895 radeon_fence_check_lockup);
896 rdev->fence_drv[ring].rdev = rdev;
897}
898
899
900
901
902
903
904
905
906
907
908
909
910
911int radeon_fence_driver_init(struct radeon_device *rdev)
912{
913 int ring;
914
915 init_waitqueue_head(&rdev->fence_queue);
916 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
917 radeon_fence_driver_init_ring(rdev, ring);
918 }
919 if (radeon_debugfs_fence_init(rdev)) {
920 dev_err(rdev->dev, "fence debugfs file creation failed\n");
921 }
922 return 0;
923}
924
925
926
927
928
929
930
931
932
933void radeon_fence_driver_fini(struct radeon_device *rdev)
934{
935 int ring, r;
936
937 mutex_lock(&rdev->ring_lock);
938 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
939 if (!rdev->fence_drv[ring].initialized)
940 continue;
941 r = radeon_fence_wait_empty(rdev, ring);
942 if (r) {
943
944 radeon_fence_driver_force_completion(rdev, ring);
945 }
946 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
947 wake_up_all(&rdev->fence_queue);
948 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
949 rdev->fence_drv[ring].initialized = false;
950 }
951 mutex_unlock(&rdev->ring_lock);
952}
953
954
955
956
957
958
959
960
961
962
963void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
964{
965 if (rdev->fence_drv[ring].initialized) {
966 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
967 cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
968 }
969}
970
971
972
973
974
975#if defined(CONFIG_DEBUG_FS)
976static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
977{
978 struct drm_info_node *node = (struct drm_info_node *)m->private;
979 struct drm_device *dev = node->minor->dev;
980 struct radeon_device *rdev = dev->dev_private;
981 int i, j;
982
983 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
984 if (!rdev->fence_drv[i].initialized)
985 continue;
986
987 radeon_fence_process(rdev, i);
988
989 seq_printf(m, "--- ring %d ---\n", i);
990 seq_printf(m, "Last signaled fence 0x%016llx\n",
991 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
992 seq_printf(m, "Last emitted 0x%016llx\n",
993 rdev->fence_drv[i].sync_seq[i]);
994
995 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
996 if (i != j && rdev->fence_drv[j].initialized)
997 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
998 j, rdev->fence_drv[i].sync_seq[j]);
999 }
1000 }
1001 return 0;
1002}
1003
1004
1005
1006
1007
1008
1009static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
1010{
1011 struct drm_info_node *node = (struct drm_info_node *) m->private;
1012 struct drm_device *dev = node->minor->dev;
1013 struct radeon_device *rdev = dev->dev_private;
1014
1015 down_read(&rdev->exclusive_lock);
1016 seq_printf(m, "%d\n", rdev->needs_reset);
1017 rdev->needs_reset = true;
1018 wake_up_all(&rdev->fence_queue);
1019 up_read(&rdev->exclusive_lock);
1020
1021 return 0;
1022}
1023
1024static struct drm_info_list radeon_debugfs_fence_list[] = {
1025 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
1026 {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
1027};
1028#endif
1029
1030int radeon_debugfs_fence_init(struct radeon_device *rdev)
1031{
1032#if defined(CONFIG_DEBUG_FS)
1033 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
1034#else
1035 return 0;
1036#endif
1037}
1038
1039static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
1040{
1041 return "radeon";
1042}
1043
1044static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
1045{
1046 struct radeon_fence *fence = to_radeon_fence(f);
1047 switch (fence->ring) {
1048 case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
1049 case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
1050 case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
1051 case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
1052 case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
1053 case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
1054 case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
1055 case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
1056 default: WARN_ON_ONCE(1); return "radeon.unk";
1057 }
1058}
1059
1060static inline bool radeon_test_signaled(struct radeon_fence *fence)
1061{
1062 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1063}
1064
1065struct radeon_wait_cb {
1066 struct dma_fence_cb base;
1067 struct task_struct *task;
1068};
1069
1070static void
1071radeon_fence_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1072{
1073 struct radeon_wait_cb *wait =
1074 container_of(cb, struct radeon_wait_cb, base);
1075
1076 wake_up_process(wait->task);
1077}
1078
1079static signed long radeon_fence_default_wait(struct dma_fence *f, bool intr,
1080 signed long t)
1081{
1082 struct radeon_fence *fence = to_radeon_fence(f);
1083 struct radeon_device *rdev = fence->rdev;
1084 struct radeon_wait_cb cb;
1085
1086 cb.task = current;
1087
1088 if (dma_fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
1089 return t;
1090
1091 while (t > 0) {
1092 if (intr)
1093 set_current_state(TASK_INTERRUPTIBLE);
1094 else
1095 set_current_state(TASK_UNINTERRUPTIBLE);
1096
1097
1098
1099
1100
1101 if (radeon_test_signaled(fence))
1102 break;
1103
1104 if (rdev->needs_reset) {
1105 t = -EDEADLK;
1106 break;
1107 }
1108
1109 t = schedule_timeout(t);
1110
1111 if (t > 0 && intr && signal_pending(current))
1112 t = -ERESTARTSYS;
1113 }
1114
1115 __set_current_state(TASK_RUNNING);
1116 dma_fence_remove_callback(f, &cb.base);
1117
1118 return t;
1119}
1120
1121const struct dma_fence_ops radeon_fence_ops = {
1122 .get_driver_name = radeon_fence_get_driver_name,
1123 .get_timeline_name = radeon_fence_get_timeline_name,
1124 .enable_signaling = radeon_fence_enable_signaling,
1125 .signaled = radeon_fence_is_signaled,
1126 .wait = radeon_fence_default_wait,
1127 .release = NULL,
1128};
1129