1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include <drm/drm_drv.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42
43
44
45
46
47
48
49
50
51
52struct amdgpu_fence {
53 struct dma_fence base;
54
55
56 struct amdgpu_ring *ring;
57};
58
59static struct kmem_cache *amdgpu_fence_slab;
60
61int amdgpu_fence_slab_init(void)
62{
63 amdgpu_fence_slab = kmem_cache_create(
64 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
65 SLAB_HWCACHE_ALIGN, NULL);
66 if (!amdgpu_fence_slab)
67 return -ENOMEM;
68 return 0;
69}
70
71void amdgpu_fence_slab_fini(void)
72{
73 rcu_barrier();
74 kmem_cache_destroy(amdgpu_fence_slab);
75}
76
77
78
79static const struct dma_fence_ops amdgpu_fence_ops;
80static const struct dma_fence_ops amdgpu_job_fence_ops;
81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
82{
83 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
84
85 if (__f->base.ops == &amdgpu_fence_ops ||
86 __f->base.ops == &amdgpu_job_fence_ops)
87 return __f;
88
89 return NULL;
90}
91
92
93
94
95
96
97
98
99
100static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
101{
102 struct amdgpu_fence_driver *drv = &ring->fence_drv;
103
104 if (drv->cpu_addr)
105 *drv->cpu_addr = cpu_to_le32(seq);
106}
107
108
109
110
111
112
113
114
115
116static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
117{
118 struct amdgpu_fence_driver *drv = &ring->fence_drv;
119 u32 seq = 0;
120
121 if (drv->cpu_addr)
122 seq = le32_to_cpu(*drv->cpu_addr);
123 else
124 seq = atomic_read(&drv->last_seq);
125
126 return seq;
127}
128
129
130
131
132
133
134
135
136
137
138
139
140int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
141 unsigned flags)
142{
143 struct amdgpu_device *adev = ring->adev;
144 struct dma_fence *fence;
145 struct amdgpu_fence *am_fence;
146 struct dma_fence __rcu **ptr;
147 uint32_t seq;
148 int r;
149
150 if (job == NULL) {
151
152 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
153 if (am_fence == NULL)
154 return -ENOMEM;
155 fence = &am_fence->base;
156 am_fence->ring = ring;
157 } else {
158
159 fence = &job->hw_fence;
160 }
161
162 seq = ++ring->fence_drv.sync_seq;
163 if (job && job->job_run_counter) {
164
165 fence->seqno = seq;
166 } else {
167 if (job)
168 dma_fence_init(fence, &amdgpu_job_fence_ops,
169 &ring->fence_drv.lock,
170 adev->fence_context + ring->idx, seq);
171 else
172 dma_fence_init(fence, &amdgpu_fence_ops,
173 &ring->fence_drv.lock,
174 adev->fence_context + ring->idx, seq);
175 }
176
177 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
178 seq, flags | AMDGPU_FENCE_FLAG_INT);
179 pm_runtime_get_noresume(adev_to_drm(adev)->dev);
180 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
181 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
182 struct dma_fence *old;
183
184 rcu_read_lock();
185 old = dma_fence_get_rcu_safe(ptr);
186 rcu_read_unlock();
187
188 if (old) {
189 r = dma_fence_wait(old, false);
190 dma_fence_put(old);
191 if (r)
192 return r;
193 }
194 }
195
196
197
198
199 rcu_assign_pointer(*ptr, dma_fence_get(fence));
200
201 *f = fence;
202
203 return 0;
204}
205
206
207
208
209
210
211
212
213
214
215
216
217int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
218 uint32_t timeout)
219{
220 uint32_t seq;
221 signed long r;
222
223 if (!s)
224 return -EINVAL;
225
226 seq = ++ring->fence_drv.sync_seq;
227 r = amdgpu_fence_wait_polling(ring,
228 seq - ring->fence_drv.num_fences_mask,
229 timeout);
230 if (r < 1)
231 return -ETIMEDOUT;
232
233 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
234 seq, 0);
235
236 *s = seq;
237
238 return 0;
239}
240
241
242
243
244
245
246
247
248static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
249{
250 mod_timer(&ring->fence_drv.fallback_timer,
251 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
252}
253
254
255
256
257
258
259
260
261
262
263
264
265bool amdgpu_fence_process(struct amdgpu_ring *ring)
266{
267 struct amdgpu_fence_driver *drv = &ring->fence_drv;
268 struct amdgpu_device *adev = ring->adev;
269 uint32_t seq, last_seq;
270
271 do {
272 last_seq = atomic_read(&ring->fence_drv.last_seq);
273 seq = amdgpu_fence_read(ring);
274
275 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
276
277 if (del_timer(&ring->fence_drv.fallback_timer) &&
278 seq != ring->fence_drv.sync_seq)
279 amdgpu_fence_schedule_fallback(ring);
280
281 if (unlikely(seq == last_seq))
282 return false;
283
284 last_seq &= drv->num_fences_mask;
285 seq &= drv->num_fences_mask;
286
287 do {
288 struct dma_fence *fence, **ptr;
289
290 ++last_seq;
291 last_seq &= drv->num_fences_mask;
292 ptr = &drv->fences[last_seq];
293
294
295 fence = rcu_dereference_protected(*ptr, 1);
296 RCU_INIT_POINTER(*ptr, NULL);
297
298 if (!fence)
299 continue;
300
301 dma_fence_signal(fence);
302 dma_fence_put(fence);
303 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
304 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
305 } while (last_seq != seq);
306
307 return true;
308}
309
310
311
312
313
314
315
316
317static void amdgpu_fence_fallback(struct timer_list *t)
318{
319 struct amdgpu_ring *ring = from_timer(ring, t,
320 fence_drv.fallback_timer);
321
322 if (amdgpu_fence_process(ring))
323 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
324}
325
326
327
328
329
330
331
332
333
334int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
335{
336 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
337 struct dma_fence *fence, **ptr;
338 int r;
339
340 if (!seq)
341 return 0;
342
343 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
344 rcu_read_lock();
345 fence = rcu_dereference(*ptr);
346 if (!fence || !dma_fence_get_rcu(fence)) {
347 rcu_read_unlock();
348 return 0;
349 }
350 rcu_read_unlock();
351
352 r = dma_fence_wait(fence, false);
353 dma_fence_put(fence);
354 return r;
355}
356
357
358
359
360
361
362
363
364
365
366
367signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
368 uint32_t wait_seq,
369 signed long timeout)
370{
371 uint32_t seq;
372
373 do {
374 seq = amdgpu_fence_read(ring);
375 udelay(5);
376 timeout -= 5;
377 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
378
379 return timeout > 0 ? timeout : 0;
380}
381
382
383
384
385
386
387
388
389
390unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
391{
392 uint64_t emitted;
393
394
395
396
397 amdgpu_fence_process(ring);
398 emitted = 0x100000000ull;
399 emitted -= atomic_read(&ring->fence_drv.last_seq);
400 emitted += READ_ONCE(ring->fence_drv.sync_seq);
401 return lower_32_bits(emitted);
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
418 struct amdgpu_irq_src *irq_src,
419 unsigned irq_type)
420{
421 struct amdgpu_device *adev = ring->adev;
422 uint64_t index;
423
424 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
425 ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
426 ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
427 } else {
428
429 index = ALIGN(adev->uvd.fw->size, 8);
430 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
431 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
432 }
433 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
434
435 ring->fence_drv.irq_src = irq_src;
436 ring->fence_drv.irq_type = irq_type;
437 ring->fence_drv.initialized = true;
438
439 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
440 ring->name, ring->fence_drv.gpu_addr);
441 return 0;
442}
443
444
445
446
447
448
449
450
451
452
453int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
454{
455 struct amdgpu_device *adev = ring->adev;
456
457 if (!adev)
458 return -EINVAL;
459
460 if (!is_power_of_2(ring->num_hw_submission))
461 return -EINVAL;
462
463 ring->fence_drv.cpu_addr = NULL;
464 ring->fence_drv.gpu_addr = 0;
465 ring->fence_drv.sync_seq = 0;
466 atomic_set(&ring->fence_drv.last_seq, 0);
467 ring->fence_drv.initialized = false;
468
469 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
470
471 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
472 spin_lock_init(&ring->fence_drv.lock);
473 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
474 GFP_KERNEL);
475
476 if (!ring->fence_drv.fences)
477 return -ENOMEM;
478
479 return 0;
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
495{
496 return 0;
497}
498
499
500
501
502
503
504
505
506
507void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
508{
509 int i, r;
510
511 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
512 struct amdgpu_ring *ring = adev->rings[i];
513
514 if (!ring || !ring->fence_drv.initialized)
515 continue;
516
517
518 if (!drm_dev_is_unplugged(adev_to_drm(adev)))
519 r = amdgpu_fence_wait_empty(ring);
520 else
521 r = -ENODEV;
522
523 if (r)
524 amdgpu_fence_driver_force_completion(ring);
525
526 if (ring->fence_drv.irq_src)
527 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
528 ring->fence_drv.irq_type);
529
530 del_timer_sync(&ring->fence_drv.fallback_timer);
531 }
532}
533
534void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
535{
536 unsigned int i, j;
537
538 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
539 struct amdgpu_ring *ring = adev->rings[i];
540
541 if (!ring || !ring->fence_drv.initialized)
542 continue;
543
544 if (!ring->no_scheduler)
545 drm_sched_fini(&ring->sched);
546
547 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
548 dma_fence_put(ring->fence_drv.fences[j]);
549 kfree(ring->fence_drv.fences);
550 ring->fence_drv.fences = NULL;
551 ring->fence_drv.initialized = false;
552 }
553}
554
555
556
557
558
559
560
561
562
563
564
565
566
567void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
568{
569 int i;
570
571 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
572 struct amdgpu_ring *ring = adev->rings[i];
573 if (!ring || !ring->fence_drv.initialized)
574 continue;
575
576
577 if (ring->fence_drv.irq_src)
578 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
579 ring->fence_drv.irq_type);
580 }
581}
582
583
584
585
586
587
588
589void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
590{
591 int i;
592 struct dma_fence *old, **ptr;
593
594 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
595 ptr = &ring->fence_drv.fences[i];
596 old = rcu_dereference_protected(*ptr, 1);
597 if (old && old->ops == &amdgpu_job_fence_ops)
598 RCU_INIT_POINTER(*ptr, NULL);
599 }
600}
601
602
603
604
605
606
607
608void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
609{
610 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
611 amdgpu_fence_process(ring);
612}
613
614
615
616
617
618static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
619{
620 return "amdgpu";
621}
622
623static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
624{
625 return (const char *)to_amdgpu_fence(f)->ring->name;
626}
627
628static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
629{
630 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
631
632 return (const char *)to_amdgpu_ring(job->base.sched)->name;
633}
634
635
636
637
638
639
640
641
642
643static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
644{
645 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
646 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
647
648 return true;
649}
650
651
652
653
654
655
656
657
658static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
659{
660 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
661
662 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
663 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
664
665 return true;
666}
667
668
669
670
671
672
673
674
675static void amdgpu_fence_free(struct rcu_head *rcu)
676{
677 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
678
679
680 kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
681}
682
683
684
685
686
687
688
689
690static void amdgpu_job_fence_free(struct rcu_head *rcu)
691{
692 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
693
694
695 kfree(container_of(f, struct amdgpu_job, hw_fence));
696}
697
698
699
700
701
702
703
704
705
706static void amdgpu_fence_release(struct dma_fence *f)
707{
708 call_rcu(&f->rcu, amdgpu_fence_free);
709}
710
711
712
713
714
715
716
717
718
719static void amdgpu_job_fence_release(struct dma_fence *f)
720{
721 call_rcu(&f->rcu, amdgpu_job_fence_free);
722}
723
724static const struct dma_fence_ops amdgpu_fence_ops = {
725 .get_driver_name = amdgpu_fence_get_driver_name,
726 .get_timeline_name = amdgpu_fence_get_timeline_name,
727 .enable_signaling = amdgpu_fence_enable_signaling,
728 .release = amdgpu_fence_release,
729};
730
731static const struct dma_fence_ops amdgpu_job_fence_ops = {
732 .get_driver_name = amdgpu_fence_get_driver_name,
733 .get_timeline_name = amdgpu_job_fence_get_timeline_name,
734 .enable_signaling = amdgpu_job_fence_enable_signaling,
735 .release = amdgpu_job_fence_release,
736};
737
738
739
740
741#if defined(CONFIG_DEBUG_FS)
742static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
743{
744 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
745 int i;
746
747 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
748 struct amdgpu_ring *ring = adev->rings[i];
749 if (!ring || !ring->fence_drv.initialized)
750 continue;
751
752 amdgpu_fence_process(ring);
753
754 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
755 seq_printf(m, "Last signaled fence 0x%08x\n",
756 atomic_read(&ring->fence_drv.last_seq));
757 seq_printf(m, "Last emitted 0x%08x\n",
758 ring->fence_drv.sync_seq);
759
760 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
761 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
762 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
763 le32_to_cpu(*ring->trail_fence_cpu_addr));
764 seq_printf(m, "Last emitted 0x%08x\n",
765 ring->trail_seq);
766 }
767
768 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
769 continue;
770
771
772 seq_printf(m, "Last preempted 0x%08x\n",
773 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
774
775 seq_printf(m, "Last reset 0x%08x\n",
776 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
777
778 seq_printf(m, "Last both 0x%08x\n",
779 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
780 }
781 return 0;
782}
783
784
785
786
787
788
789static int gpu_recover_get(void *data, u64 *val)
790{
791 struct amdgpu_device *adev = (struct amdgpu_device *)data;
792 struct drm_device *dev = adev_to_drm(adev);
793 int r;
794
795 r = pm_runtime_get_sync(dev->dev);
796 if (r < 0) {
797 pm_runtime_put_autosuspend(dev->dev);
798 return 0;
799 }
800
801 *val = amdgpu_device_gpu_recover(adev, NULL);
802
803 pm_runtime_mark_last_busy(dev->dev);
804 pm_runtime_put_autosuspend(dev->dev);
805
806 return 0;
807}
808
809DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
810DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
811 "%lld\n");
812
813#endif
814
815void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
816{
817#if defined(CONFIG_DEBUG_FS)
818 struct drm_minor *minor = adev_to_drm(adev)->primary;
819 struct dentry *root = minor->debugfs_root;
820
821 debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
822 &amdgpu_debugfs_fence_info_fops);
823
824 if (!amdgpu_sriov_vf(adev))
825 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
826 &amdgpu_debugfs_gpu_recover_fops);
827#endif
828}
829
830