1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include <drm/drm_drv.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42
43
44
45
46
47
48
49
50
51
52struct amdgpu_fence {
53 struct dma_fence base;
54
55
56 struct amdgpu_ring *ring;
57};
58
59static struct kmem_cache *amdgpu_fence_slab;
60
61int amdgpu_fence_slab_init(void)
62{
63 amdgpu_fence_slab = kmem_cache_create(
64 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
65 SLAB_HWCACHE_ALIGN, NULL);
66 if (!amdgpu_fence_slab)
67 return -ENOMEM;
68 return 0;
69}
70
71void amdgpu_fence_slab_fini(void)
72{
73 rcu_barrier();
74 kmem_cache_destroy(amdgpu_fence_slab);
75}
76
77
78
79static const struct dma_fence_ops amdgpu_fence_ops;
80static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
81{
82 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
83
84 if (__f->base.ops == &amdgpu_fence_ops)
85 return __f;
86
87 return NULL;
88}
89
90
91
92
93
94
95
96
97
98static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
99{
100 struct amdgpu_fence_driver *drv = &ring->fence_drv;
101
102 if (drv->cpu_addr)
103 *drv->cpu_addr = cpu_to_le32(seq);
104}
105
106
107
108
109
110
111
112
113
114static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
115{
116 struct amdgpu_fence_driver *drv = &ring->fence_drv;
117 u32 seq = 0;
118
119 if (drv->cpu_addr)
120 seq = le32_to_cpu(*drv->cpu_addr);
121 else
122 seq = atomic_read(&drv->last_seq);
123
124 return seq;
125}
126
127
128
129
130
131
132
133
134
135
136
137
138int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
139 unsigned flags)
140{
141 struct amdgpu_device *adev = ring->adev;
142 struct dma_fence *fence;
143 struct amdgpu_fence *am_fence;
144 struct dma_fence __rcu **ptr;
145 uint32_t seq;
146 int r;
147
148 if (job == NULL) {
149
150 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
151 if (am_fence == NULL)
152 return -ENOMEM;
153 fence = &am_fence->base;
154 am_fence->ring = ring;
155 } else {
156
157 fence = &job->hw_fence;
158 }
159
160 seq = ++ring->fence_drv.sync_seq;
161 if (job != NULL && job->job_run_counter) {
162
163 fence->seqno = seq;
164 } else {
165 dma_fence_init(fence, &amdgpu_fence_ops,
166 &ring->fence_drv.lock,
167 adev->fence_context + ring->idx,
168 seq);
169 }
170
171 if (job != NULL) {
172
173 set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
174 }
175
176 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
177 seq, flags | AMDGPU_FENCE_FLAG_INT);
178 pm_runtime_get_noresume(adev_to_drm(adev)->dev);
179 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
180 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
181 struct dma_fence *old;
182
183 rcu_read_lock();
184 old = dma_fence_get_rcu_safe(ptr);
185 rcu_read_unlock();
186
187 if (old) {
188 r = dma_fence_wait(old, false);
189 dma_fence_put(old);
190 if (r)
191 return r;
192 }
193 }
194
195
196
197
198 rcu_assign_pointer(*ptr, dma_fence_get(fence));
199
200 *f = fence;
201
202 return 0;
203}
204
205
206
207
208
209
210
211
212
213
214
215
216int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
217 uint32_t timeout)
218{
219 uint32_t seq;
220 signed long r;
221
222 if (!s)
223 return -EINVAL;
224
225 seq = ++ring->fence_drv.sync_seq;
226 r = amdgpu_fence_wait_polling(ring,
227 seq - ring->fence_drv.num_fences_mask,
228 timeout);
229 if (r < 1)
230 return -ETIMEDOUT;
231
232 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
233 seq, 0);
234
235 *s = seq;
236
237 return 0;
238}
239
240
241
242
243
244
245
246
247static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
248{
249 mod_timer(&ring->fence_drv.fallback_timer,
250 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264bool amdgpu_fence_process(struct amdgpu_ring *ring)
265{
266 struct amdgpu_fence_driver *drv = &ring->fence_drv;
267 struct amdgpu_device *adev = ring->adev;
268 uint32_t seq, last_seq;
269 int r;
270
271 do {
272 last_seq = atomic_read(&ring->fence_drv.last_seq);
273 seq = amdgpu_fence_read(ring);
274
275 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
276
277 if (del_timer(&ring->fence_drv.fallback_timer) &&
278 seq != ring->fence_drv.sync_seq)
279 amdgpu_fence_schedule_fallback(ring);
280
281 if (unlikely(seq == last_seq))
282 return false;
283
284 last_seq &= drv->num_fences_mask;
285 seq &= drv->num_fences_mask;
286
287 do {
288 struct dma_fence *fence, **ptr;
289
290 ++last_seq;
291 last_seq &= drv->num_fences_mask;
292 ptr = &drv->fences[last_seq];
293
294
295 fence = rcu_dereference_protected(*ptr, 1);
296 RCU_INIT_POINTER(*ptr, NULL);
297
298 if (!fence)
299 continue;
300
301 r = dma_fence_signal(fence);
302 if (!r)
303 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
304 else
305 BUG();
306
307 dma_fence_put(fence);
308 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
309 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
310 } while (last_seq != seq);
311
312 return true;
313}
314
315
316
317
318
319
320
321
322static void amdgpu_fence_fallback(struct timer_list *t)
323{
324 struct amdgpu_ring *ring = from_timer(ring, t,
325 fence_drv.fallback_timer);
326
327 if (amdgpu_fence_process(ring))
328 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
329}
330
331
332
333
334
335
336
337
338
339int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
340{
341 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
342 struct dma_fence *fence, **ptr;
343 int r;
344
345 if (!seq)
346 return 0;
347
348 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
349 rcu_read_lock();
350 fence = rcu_dereference(*ptr);
351 if (!fence || !dma_fence_get_rcu(fence)) {
352 rcu_read_unlock();
353 return 0;
354 }
355 rcu_read_unlock();
356
357 r = dma_fence_wait(fence, false);
358 dma_fence_put(fence);
359 return r;
360}
361
362
363
364
365
366
367
368
369
370
371
372signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
373 uint32_t wait_seq,
374 signed long timeout)
375{
376 uint32_t seq;
377
378 do {
379 seq = amdgpu_fence_read(ring);
380 udelay(5);
381 timeout -= 5;
382 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
383
384 return timeout > 0 ? timeout : 0;
385}
386
387
388
389
390
391
392
393
394
395unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
396{
397 uint64_t emitted;
398
399
400
401
402 amdgpu_fence_process(ring);
403 emitted = 0x100000000ull;
404 emitted -= atomic_read(&ring->fence_drv.last_seq);
405 emitted += READ_ONCE(ring->fence_drv.sync_seq);
406 return lower_32_bits(emitted);
407}
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
423 struct amdgpu_irq_src *irq_src,
424 unsigned irq_type)
425{
426 struct amdgpu_device *adev = ring->adev;
427 uint64_t index;
428
429 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
430 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
431 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
432 } else {
433
434 index = ALIGN(adev->uvd.fw->size, 8);
435 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
436 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
437 }
438 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
439
440 ring->fence_drv.irq_src = irq_src;
441 ring->fence_drv.irq_type = irq_type;
442 ring->fence_drv.initialized = true;
443
444 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
445 ring->name, ring->fence_drv.gpu_addr);
446 return 0;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
461 unsigned num_hw_submission,
462 atomic_t *sched_score)
463{
464 struct amdgpu_device *adev = ring->adev;
465 long timeout;
466 int r;
467
468 if (!adev)
469 return -EINVAL;
470
471 if (!is_power_of_2(num_hw_submission))
472 return -EINVAL;
473
474 ring->fence_drv.cpu_addr = NULL;
475 ring->fence_drv.gpu_addr = 0;
476 ring->fence_drv.sync_seq = 0;
477 atomic_set(&ring->fence_drv.last_seq, 0);
478 ring->fence_drv.initialized = false;
479
480 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
481
482 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
483 spin_lock_init(&ring->fence_drv.lock);
484 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
485 GFP_KERNEL);
486 if (!ring->fence_drv.fences)
487 return -ENOMEM;
488
489
490 if (ring->no_scheduler)
491 return 0;
492
493 switch (ring->funcs->type) {
494 case AMDGPU_RING_TYPE_GFX:
495 timeout = adev->gfx_timeout;
496 break;
497 case AMDGPU_RING_TYPE_COMPUTE:
498 timeout = adev->compute_timeout;
499 break;
500 case AMDGPU_RING_TYPE_SDMA:
501 timeout = adev->sdma_timeout;
502 break;
503 default:
504 timeout = adev->video_timeout;
505 break;
506 }
507
508 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
509 num_hw_submission, amdgpu_job_hang_limit,
510 timeout, NULL, sched_score, ring->name);
511 if (r) {
512 DRM_ERROR("Failed to create scheduler on ring %s.\n",
513 ring->name);
514 return r;
515 }
516
517 return 0;
518}
519
520
521
522
523
524
525
526
527
528
529
530
531
532int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
533{
534 return 0;
535}
536
537
538
539
540
541
542
543
544
545void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
546{
547 int i, r;
548
549 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
550 struct amdgpu_ring *ring = adev->rings[i];
551
552 if (!ring || !ring->fence_drv.initialized)
553 continue;
554
555 if (!ring->no_scheduler)
556 drm_sched_stop(&ring->sched, NULL);
557
558
559 if (!drm_dev_is_unplugged(&adev->ddev))
560 r = amdgpu_fence_wait_empty(ring);
561 else
562 r = -ENODEV;
563
564 if (r)
565 amdgpu_fence_driver_force_completion(ring);
566
567 if (ring->fence_drv.irq_src)
568 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
569 ring->fence_drv.irq_type);
570
571 del_timer_sync(&ring->fence_drv.fallback_timer);
572 }
573}
574
575void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
576{
577 unsigned int i, j;
578
579 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
580 struct amdgpu_ring *ring = adev->rings[i];
581
582 if (!ring || !ring->fence_drv.initialized)
583 continue;
584
585 if (!ring->no_scheduler)
586 drm_sched_fini(&ring->sched);
587
588 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
589 dma_fence_put(ring->fence_drv.fences[j]);
590 kfree(ring->fence_drv.fences);
591 ring->fence_drv.fences = NULL;
592 ring->fence_drv.initialized = false;
593 }
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
609{
610 int i;
611
612 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
613 struct amdgpu_ring *ring = adev->rings[i];
614 if (!ring || !ring->fence_drv.initialized)
615 continue;
616
617 if (!ring->no_scheduler) {
618 drm_sched_resubmit_jobs(&ring->sched);
619 drm_sched_start(&ring->sched, true);
620 }
621
622
623 if (ring->fence_drv.irq_src)
624 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
625 ring->fence_drv.irq_type);
626 }
627}
628
629
630
631
632
633
634
635void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
636{
637 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
638 amdgpu_fence_process(ring);
639}
640
641
642
643
644
645static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
646{
647 return "amdgpu";
648}
649
650static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
651{
652 struct amdgpu_ring *ring;
653
654 if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
655 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
656
657 ring = to_amdgpu_ring(job->base.sched);
658 } else {
659 ring = to_amdgpu_fence(f)->ring;
660 }
661 return (const char *)ring->name;
662}
663
664
665
666
667
668
669
670
671
672static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
673{
674 struct amdgpu_ring *ring;
675
676 if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
677 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
678
679 ring = to_amdgpu_ring(job->base.sched);
680 } else {
681 ring = to_amdgpu_fence(f)->ring;
682 }
683
684 if (!timer_pending(&ring->fence_drv.fallback_timer))
685 amdgpu_fence_schedule_fallback(ring);
686
687 DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
688
689 return true;
690}
691
692
693
694
695
696
697
698
699static void amdgpu_fence_free(struct rcu_head *rcu)
700{
701 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
702
703 if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
704
705 struct amdgpu_job *job;
706
707 job = container_of(f, struct amdgpu_job, hw_fence);
708 kfree(job);
709 } else {
710
711 struct amdgpu_fence *fence;
712
713 fence = to_amdgpu_fence(f);
714 kmem_cache_free(amdgpu_fence_slab, fence);
715 }
716}
717
718
719
720
721
722
723
724
725
726static void amdgpu_fence_release(struct dma_fence *f)
727{
728 call_rcu(&f->rcu, amdgpu_fence_free);
729}
730
731static const struct dma_fence_ops amdgpu_fence_ops = {
732 .get_driver_name = amdgpu_fence_get_driver_name,
733 .get_timeline_name = amdgpu_fence_get_timeline_name,
734 .enable_signaling = amdgpu_fence_enable_signaling,
735 .release = amdgpu_fence_release,
736};
737
738
739
740
741
742#if defined(CONFIG_DEBUG_FS)
743static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
744{
745 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
746 int i;
747
748 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
749 struct amdgpu_ring *ring = adev->rings[i];
750 if (!ring || !ring->fence_drv.initialized)
751 continue;
752
753 amdgpu_fence_process(ring);
754
755 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
756 seq_printf(m, "Last signaled fence 0x%08x\n",
757 atomic_read(&ring->fence_drv.last_seq));
758 seq_printf(m, "Last emitted 0x%08x\n",
759 ring->fence_drv.sync_seq);
760
761 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
762 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
763 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
764 le32_to_cpu(*ring->trail_fence_cpu_addr));
765 seq_printf(m, "Last emitted 0x%08x\n",
766 ring->trail_seq);
767 }
768
769 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
770 continue;
771
772
773 seq_printf(m, "Last preempted 0x%08x\n",
774 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
775
776 seq_printf(m, "Last reset 0x%08x\n",
777 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
778
779 seq_printf(m, "Last both 0x%08x\n",
780 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
781 }
782 return 0;
783}
784
785
786
787
788
789
790static int gpu_recover_get(void *data, u64 *val)
791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)data;
793 struct drm_device *dev = adev_to_drm(adev);
794 int r;
795
796 r = pm_runtime_get_sync(dev->dev);
797 if (r < 0) {
798 pm_runtime_put_autosuspend(dev->dev);
799 return 0;
800 }
801
802 *val = amdgpu_device_gpu_recover(adev, NULL);
803
804 pm_runtime_mark_last_busy(dev->dev);
805 pm_runtime_put_autosuspend(dev->dev);
806
807 return 0;
808}
809
810DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
811DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
812 "%lld\n");
813
814#endif
815
816void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
817{
818#if defined(CONFIG_DEBUG_FS)
819 struct drm_minor *minor = adev_to_drm(adev)->primary;
820 struct dentry *root = minor->debugfs_root;
821
822 debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
823 &amdgpu_debugfs_fence_info_fops);
824
825 if (!amdgpu_sriov_vf(adev))
826 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
827 &amdgpu_debugfs_gpu_recover_fops);
828#endif
829}
830
831