1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h>
38#include "radeon_reg.h"
39#include "radeon.h"
40#include "radeon_trace.h"
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
63{
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
66 if (drv->cpu_addr) {
67 *drv->cpu_addr = cpu_to_le32(seq);
68 }
69 } else {
70 WREG32(drv->scratch_reg, seq);
71 }
72}
73
74
75
76
77
78
79
80
81
82
83static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
84{
85 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
86 u32 seq = 0;
87
88 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
89 if (drv->cpu_addr) {
90 seq = le32_to_cpu(*drv->cpu_addr);
91 } else {
92 seq = lower_32_bits(atomic64_read(&drv->last_seq));
93 }
94 } else {
95 seq = RREG32(drv->scratch_reg);
96 }
97 return seq;
98}
99
100
101
102
103
104
105
106
107
108
109
110int radeon_fence_emit(struct radeon_device *rdev,
111 struct radeon_fence **fence,
112 int ring)
113{
114
115 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
116 if ((*fence) == NULL) {
117 return -ENOMEM;
118 }
119 kref_init(&((*fence)->kref));
120 (*fence)->rdev = rdev;
121 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
122 (*fence)->ring = ring;
123 radeon_fence_ring_emit(rdev, ring, *fence);
124 trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
125 return 0;
126}
127
128
129
130
131
132
133
134
135
136
137void radeon_fence_process(struct radeon_device *rdev, int ring)
138{
139 uint64_t seq, last_seq, last_emitted;
140 unsigned count_loop = 0;
141 bool wake = false;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
165 do {
166 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
167 seq = radeon_fence_read(rdev, ring);
168 seq |= last_seq & 0xffffffff00000000LL;
169 if (seq < last_seq) {
170 seq &= 0xffffffff;
171 seq |= last_emitted & 0xffffffff00000000LL;
172 }
173
174 if (seq <= last_seq || seq > last_emitted) {
175 break;
176 }
177
178
179
180
181 wake = true;
182 last_seq = seq;
183 if ((count_loop++) > 10) {
184
185
186
187
188
189 break;
190 }
191 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
192
193 if (wake) {
194 rdev->fence_drv[ring].last_activity = jiffies;
195 wake_up_all(&rdev->fence_queue);
196 }
197}
198
199
200
201
202
203
204
205
206static void radeon_fence_destroy(struct kref *kref)
207{
208 struct radeon_fence *fence;
209
210 fence = container_of(kref, struct radeon_fence, kref);
211 kfree(fence);
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
229 u64 seq, unsigned ring)
230{
231 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
232 return true;
233 }
234
235 radeon_fence_process(rdev, ring);
236 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
237 return true;
238 }
239 return false;
240}
241
242
243
244
245
246
247
248
249
250bool radeon_fence_signaled(struct radeon_fence *fence)
251{
252 if (!fence) {
253 return true;
254 }
255 if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
256 return true;
257 }
258 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
259 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
260 return true;
261 }
262 return false;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
284 unsigned ring, bool intr, bool lock_ring)
285{
286 unsigned long timeout, last_activity;
287 uint64_t seq;
288 unsigned i;
289 bool signaled;
290 int r;
291
292 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
293 if (!rdev->ring[ring].ready) {
294 return -EBUSY;
295 }
296
297 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
298 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
299
300 timeout = rdev->fence_drv[ring].last_activity - timeout;
301 } else {
302
303
304
305 timeout = 1;
306 }
307 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
308
309 last_activity = rdev->fence_drv[ring].last_activity;
310
311 trace_radeon_fence_wait_begin(rdev->ddev, seq);
312 radeon_irq_kms_sw_irq_get(rdev, ring);
313 if (intr) {
314 r = wait_event_interruptible_timeout(rdev->fence_queue,
315 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
316 timeout);
317 } else {
318 r = wait_event_timeout(rdev->fence_queue,
319 (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
320 timeout);
321 }
322 radeon_irq_kms_sw_irq_put(rdev, ring);
323 if (unlikely(r < 0)) {
324 return r;
325 }
326 trace_radeon_fence_wait_end(rdev->ddev, seq);
327
328 if (unlikely(!signaled)) {
329
330
331 if (r) {
332 continue;
333 }
334
335
336 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
337 continue;
338 }
339
340 if (lock_ring) {
341 mutex_lock(&rdev->ring_lock);
342 }
343
344
345 if (last_activity != rdev->fence_drv[ring].last_activity) {
346 if (lock_ring) {
347 mutex_unlock(&rdev->ring_lock);
348 }
349 continue;
350 }
351
352 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
353
354 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
355 target_seq, seq);
356
357
358 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
359 rdev->fence_drv[i].last_activity = jiffies;
360 }
361
362
363 rdev->ring[ring].ready = false;
364 if (lock_ring) {
365 mutex_unlock(&rdev->ring_lock);
366 }
367 return -EDEADLK;
368 }
369
370 if (lock_ring) {
371 mutex_unlock(&rdev->ring_lock);
372 }
373 }
374 }
375 return 0;
376}
377
378
379
380
381
382
383
384
385
386
387
388
389int radeon_fence_wait(struct radeon_fence *fence, bool intr)
390{
391 int r;
392
393 if (fence == NULL) {
394 WARN(1, "Querying an invalid fence : %p !\n", fence);
395 return -EINVAL;
396 }
397
398 r = radeon_fence_wait_seq(fence->rdev, fence->seq,
399 fence->ring, intr, true);
400 if (r) {
401 return r;
402 }
403 fence->seq = RADEON_FENCE_SIGNALED_SEQ;
404 return 0;
405}
406
407static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
408{
409 unsigned i;
410
411 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
412 if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
413 return true;
414 }
415 }
416 return false;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
434 u64 *target_seq, bool intr)
435{
436 unsigned long timeout, last_activity, tmp;
437 unsigned i, ring = RADEON_NUM_RINGS;
438 bool signaled;
439 int r;
440
441 for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
442 if (!target_seq[i]) {
443 continue;
444 }
445
446
447 if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
448 last_activity = rdev->fence_drv[i].last_activity;
449 }
450
451
452
453
454 if (i < ring) {
455 ring = i;
456 }
457 }
458
459
460 if (ring == RADEON_NUM_RINGS) {
461 return -ENOENT;
462 }
463
464 while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
465 timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
466 if (time_after(last_activity, timeout)) {
467
468 timeout = last_activity - timeout;
469 } else {
470
471
472
473 timeout = 1;
474 }
475
476 trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
477 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
478 if (target_seq[i]) {
479 radeon_irq_kms_sw_irq_get(rdev, i);
480 }
481 }
482 if (intr) {
483 r = wait_event_interruptible_timeout(rdev->fence_queue,
484 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
485 timeout);
486 } else {
487 r = wait_event_timeout(rdev->fence_queue,
488 (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
489 timeout);
490 }
491 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
492 if (target_seq[i]) {
493 radeon_irq_kms_sw_irq_put(rdev, i);
494 }
495 }
496 if (unlikely(r < 0)) {
497 return r;
498 }
499 trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
500
501 if (unlikely(!signaled)) {
502
503
504 if (r) {
505 continue;
506 }
507
508 mutex_lock(&rdev->ring_lock);
509 for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
510 if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
511 tmp = rdev->fence_drv[i].last_activity;
512 }
513 }
514
515 if (last_activity != tmp) {
516 last_activity = tmp;
517 mutex_unlock(&rdev->ring_lock);
518 continue;
519 }
520
521 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
522
523 dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
524 target_seq[ring]);
525
526
527 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
528 rdev->fence_drv[i].last_activity = jiffies;
529 }
530
531
532 rdev->ring[ring].ready = false;
533 mutex_unlock(&rdev->ring_lock);
534 return -EDEADLK;
535 }
536 mutex_unlock(&rdev->ring_lock);
537 }
538 }
539 return 0;
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555int radeon_fence_wait_any(struct radeon_device *rdev,
556 struct radeon_fence **fences,
557 bool intr)
558{
559 uint64_t seq[RADEON_NUM_RINGS];
560 unsigned i;
561 int r;
562
563 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
564 seq[i] = 0;
565
566 if (!fences[i]) {
567 continue;
568 }
569
570 if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
571
572 return 0;
573 }
574
575 seq[i] = fences[i]->seq;
576 }
577
578 r = radeon_fence_wait_any_seq(rdev, seq, intr);
579 if (r) {
580 return r;
581 }
582 return 0;
583}
584
585
586
587
588
589
590
591
592
593
594
595int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
596{
597 uint64_t seq;
598
599 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
600 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
601
602
603 return -ENOENT;
604 }
605 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
606}
607
608
609
610
611
612
613
614
615
616
617
618int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
619{
620 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
621 int r;
622
623 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
624 if (r) {
625 if (r == -EDEADLK) {
626 return -EDEADLK;
627 }
628 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
629 ring, r);
630 }
631 return 0;
632}
633
634
635
636
637
638
639
640
641
642struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
643{
644 kref_get(&fence->kref);
645 return fence;
646}
647
648
649
650
651
652
653
654
655void radeon_fence_unref(struct radeon_fence **fence)
656{
657 struct radeon_fence *tmp = *fence;
658
659 *fence = NULL;
660 if (tmp) {
661 kref_put(&tmp->kref, radeon_fence_destroy);
662 }
663}
664
665
666
667
668
669
670
671
672
673
674
675unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
676{
677 uint64_t emitted;
678
679
680
681
682 radeon_fence_process(rdev, ring);
683 emitted = rdev->fence_drv[ring].sync_seq[ring]
684 - atomic64_read(&rdev->fence_drv[ring].last_seq);
685
686 if (emitted > 0x10000000) {
687 emitted = 0x10000000;
688 }
689 return (unsigned)emitted;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
704{
705 struct radeon_fence_driver *fdrv;
706
707 if (!fence) {
708 return false;
709 }
710
711 if (fence->ring == dst_ring) {
712 return false;
713 }
714
715
716 fdrv = &fence->rdev->fence_drv[dst_ring];
717 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
718 return false;
719 }
720
721 return true;
722}
723
724
725
726
727
728
729
730
731
732
733void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
734{
735 struct radeon_fence_driver *dst, *src;
736 unsigned i;
737
738 if (!fence) {
739 return;
740 }
741
742 if (fence->ring == dst_ring) {
743 return;
744 }
745
746
747 src = &fence->rdev->fence_drv[fence->ring];
748 dst = &fence->rdev->fence_drv[dst_ring];
749 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
750 if (i == dst_ring) {
751 continue;
752 }
753 dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
754 }
755}
756
757
758
759
760
761
762
763
764
765
766
767
768
769int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
770{
771 uint64_t index;
772 int r;
773
774 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
775 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
776 rdev->fence_drv[ring].scratch_reg = 0;
777 if (ring != R600_RING_TYPE_UVD_INDEX) {
778 index = R600_WB_EVENT_OFFSET + ring * 4;
779 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
780 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
781 index;
782
783 } else {
784
785 index = ALIGN(rdev->uvd_fw->size, 8);
786 rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
787 rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
788 }
789
790 } else {
791 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
792 if (r) {
793 dev_err(rdev->dev, "fence failed to get scratch register\n");
794 return r;
795 }
796 index = RADEON_WB_SCRATCH_OFFSET +
797 rdev->fence_drv[ring].scratch_reg -
798 rdev->scratch.reg_base;
799 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
800 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
801 }
802 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
803 rdev->fence_drv[ring].initialized = true;
804 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
805 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
806 return 0;
807}
808
809
810
811
812
813
814
815
816
817
818
819static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
820{
821 int i;
822
823 rdev->fence_drv[ring].scratch_reg = -1;
824 rdev->fence_drv[ring].cpu_addr = NULL;
825 rdev->fence_drv[ring].gpu_addr = 0;
826 for (i = 0; i < RADEON_NUM_RINGS; ++i)
827 rdev->fence_drv[ring].sync_seq[i] = 0;
828 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
829 rdev->fence_drv[ring].last_activity = jiffies;
830 rdev->fence_drv[ring].initialized = false;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845int radeon_fence_driver_init(struct radeon_device *rdev)
846{
847 int ring;
848
849 init_waitqueue_head(&rdev->fence_queue);
850 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
851 radeon_fence_driver_init_ring(rdev, ring);
852 }
853 if (radeon_debugfs_fence_init(rdev)) {
854 dev_err(rdev->dev, "fence debugfs file creation failed\n");
855 }
856 return 0;
857}
858
859
860
861
862
863
864
865
866
867void radeon_fence_driver_fini(struct radeon_device *rdev)
868{
869 int ring, r;
870
871 mutex_lock(&rdev->ring_lock);
872 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
873 if (!rdev->fence_drv[ring].initialized)
874 continue;
875 r = radeon_fence_wait_empty_locked(rdev, ring);
876 if (r) {
877
878 radeon_fence_driver_force_completion(rdev);
879 }
880 wake_up_all(&rdev->fence_queue);
881 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
882 rdev->fence_drv[ring].initialized = false;
883 }
884 mutex_unlock(&rdev->ring_lock);
885}
886
887
888
889
890
891
892
893
894
895void radeon_fence_driver_force_completion(struct radeon_device *rdev)
896{
897 int ring;
898
899 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
900 if (!rdev->fence_drv[ring].initialized)
901 continue;
902 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
903 }
904}
905
906
907
908
909
910#if defined(CONFIG_DEBUG_FS)
911static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
912{
913 struct drm_info_node *node = (struct drm_info_node *)m->private;
914 struct drm_device *dev = node->minor->dev;
915 struct radeon_device *rdev = dev->dev_private;
916 int i, j;
917
918 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
919 if (!rdev->fence_drv[i].initialized)
920 continue;
921
922 seq_printf(m, "--- ring %d ---\n", i);
923 seq_printf(m, "Last signaled fence 0x%016llx\n",
924 (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
925 seq_printf(m, "Last emitted 0x%016llx\n",
926 rdev->fence_drv[i].sync_seq[i]);
927
928 for (j = 0; j < RADEON_NUM_RINGS; ++j) {
929 if (i != j && rdev->fence_drv[j].initialized)
930 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
931 j, rdev->fence_drv[i].sync_seq[j]);
932 }
933 }
934 return 0;
935}
936
937static struct drm_info_list radeon_debugfs_fence_list[] = {
938 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
939};
940#endif
941
942int radeon_debugfs_fence_init(struct radeon_device *rdev)
943{
944#if defined(CONFIG_DEBUG_FS)
945 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
946#else
947 return 0;
948#endif
949}
950