1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include <drm/drm_debugfs.h>
40
41#include "amdgpu.h"
42#include "amdgpu_trace.h"
43
44
45
46
47
48
49
50
51
52
53struct amdgpu_fence {
54 struct dma_fence base;
55
56
57 struct amdgpu_ring *ring;
58};
59
60static struct kmem_cache *amdgpu_fence_slab;
61
62int amdgpu_fence_slab_init(void)
63{
64 amdgpu_fence_slab = kmem_cache_create(
65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
66 SLAB_HWCACHE_ALIGN, NULL);
67 if (!amdgpu_fence_slab)
68 return -ENOMEM;
69 return 0;
70}
71
72void amdgpu_fence_slab_fini(void)
73{
74 rcu_barrier();
75 kmem_cache_destroy(amdgpu_fence_slab);
76}
77
78
79
80static const struct dma_fence_ops amdgpu_fence_ops;
81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
82{
83 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
84
85 if (__f->base.ops == &amdgpu_fence_ops)
86 return __f;
87
88 return NULL;
89}
90
91
92
93
94
95
96
97
98
99static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
100{
101 struct amdgpu_fence_driver *drv = &ring->fence_drv;
102
103 if (drv->cpu_addr)
104 *drv->cpu_addr = cpu_to_le32(seq);
105}
106
107
108
109
110
111
112
113
114
115static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
116{
117 struct amdgpu_fence_driver *drv = &ring->fence_drv;
118 u32 seq = 0;
119
120 if (drv->cpu_addr)
121 seq = le32_to_cpu(*drv->cpu_addr);
122 else
123 seq = atomic_read(&drv->last_seq);
124
125 return seq;
126}
127
128
129
130
131
132
133
134
135
136
137int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
138 unsigned flags)
139{
140 struct amdgpu_device *adev = ring->adev;
141 struct amdgpu_fence *fence;
142 struct dma_fence __rcu **ptr;
143 uint32_t seq;
144 int r;
145
146 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
147 if (fence == NULL)
148 return -ENOMEM;
149
150 seq = ++ring->fence_drv.sync_seq;
151 fence->ring = ring;
152 dma_fence_init(&fence->base, &amdgpu_fence_ops,
153 &ring->fence_drv.lock,
154 adev->fence_context + ring->idx,
155 seq);
156 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
157 seq, flags | AMDGPU_FENCE_FLAG_INT);
158 pm_runtime_get_noresume(adev->ddev->dev);
159 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
160 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
161 struct dma_fence *old;
162
163 rcu_read_lock();
164 old = dma_fence_get_rcu_safe(ptr);
165 rcu_read_unlock();
166
167 if (old) {
168 r = dma_fence_wait(old, false);
169 dma_fence_put(old);
170 if (r)
171 return r;
172 }
173 }
174
175
176
177
178 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
179
180 *f = &fence->base;
181
182 return 0;
183}
184
185
186
187
188
189
190
191
192
193
194
195int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
196 uint32_t timeout)
197{
198 uint32_t seq;
199 signed long r;
200
201 if (!s)
202 return -EINVAL;
203
204 seq = ++ring->fence_drv.sync_seq;
205 r = amdgpu_fence_wait_polling(ring,
206 seq - ring->fence_drv.num_fences_mask,
207 timeout);
208 if (r < 1)
209 return -ETIMEDOUT;
210
211 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
212 seq, 0);
213
214 *s = seq;
215
216 return 0;
217}
218
219
220
221
222
223
224
225
226static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
227{
228 mod_timer(&ring->fence_drv.fallback_timer,
229 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
230}
231
232
233
234
235
236
237
238
239
240
241
242
243bool amdgpu_fence_process(struct amdgpu_ring *ring)
244{
245 struct amdgpu_fence_driver *drv = &ring->fence_drv;
246 struct amdgpu_device *adev = ring->adev;
247 uint32_t seq, last_seq;
248 int r;
249
250 do {
251 last_seq = atomic_read(&ring->fence_drv.last_seq);
252 seq = amdgpu_fence_read(ring);
253
254 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
255
256 if (del_timer(&ring->fence_drv.fallback_timer) &&
257 seq != ring->fence_drv.sync_seq)
258 amdgpu_fence_schedule_fallback(ring);
259
260 if (unlikely(seq == last_seq))
261 return false;
262
263 last_seq &= drv->num_fences_mask;
264 seq &= drv->num_fences_mask;
265
266 do {
267 struct dma_fence *fence, **ptr;
268
269 ++last_seq;
270 last_seq &= drv->num_fences_mask;
271 ptr = &drv->fences[last_seq];
272
273
274 fence = rcu_dereference_protected(*ptr, 1);
275 RCU_INIT_POINTER(*ptr, NULL);
276
277 if (!fence)
278 continue;
279
280 r = dma_fence_signal(fence);
281 if (!r)
282 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
283 else
284 BUG();
285
286 dma_fence_put(fence);
287 pm_runtime_mark_last_busy(adev->ddev->dev);
288 pm_runtime_put_autosuspend(adev->ddev->dev);
289 } while (last_seq != seq);
290
291 return true;
292}
293
294
295
296
297
298
299
300
301static void amdgpu_fence_fallback(struct timer_list *t)
302{
303 struct amdgpu_ring *ring = from_timer(ring, t,
304 fence_drv.fallback_timer);
305
306 if (amdgpu_fence_process(ring))
307 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
308}
309
310
311
312
313
314
315
316
317
318
319int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
320{
321 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
322 struct dma_fence *fence, **ptr;
323 int r;
324
325 if (!seq)
326 return 0;
327
328 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
329 rcu_read_lock();
330 fence = rcu_dereference(*ptr);
331 if (!fence || !dma_fence_get_rcu(fence)) {
332 rcu_read_unlock();
333 return 0;
334 }
335 rcu_read_unlock();
336
337 r = dma_fence_wait(fence, false);
338 dma_fence_put(fence);
339 return r;
340}
341
342
343
344
345
346
347
348
349
350
351
352signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
353 uint32_t wait_seq,
354 signed long timeout)
355{
356 uint32_t seq;
357
358 do {
359 seq = amdgpu_fence_read(ring);
360 udelay(5);
361 timeout -= 5;
362 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
363
364 return timeout > 0 ? timeout : 0;
365}
366
367
368
369
370
371
372
373
374
375unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
376{
377 uint64_t emitted;
378
379
380
381
382 amdgpu_fence_process(ring);
383 emitted = 0x100000000ull;
384 emitted -= atomic_read(&ring->fence_drv.last_seq);
385 emitted += READ_ONCE(ring->fence_drv.sync_seq);
386 return lower_32_bits(emitted);
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
403 struct amdgpu_irq_src *irq_src,
404 unsigned irq_type)
405{
406 struct amdgpu_device *adev = ring->adev;
407 uint64_t index;
408
409 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
410 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
411 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
412 } else {
413
414 index = ALIGN(adev->uvd.fw->size, 8);
415 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
416 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
417 }
418 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
419
420 if (irq_src)
421 amdgpu_irq_get(adev, irq_src, irq_type);
422
423 ring->fence_drv.irq_src = irq_src;
424 ring->fence_drv.irq_type = irq_type;
425 ring->fence_drv.initialized = true;
426
427 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
428 ring->name, ring->fence_drv.gpu_addr);
429 return 0;
430}
431
432
433
434
435
436
437
438
439
440
441
442int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
443 unsigned num_hw_submission)
444{
445 struct amdgpu_device *adev = ring->adev;
446 long timeout;
447 int r;
448
449 if (!adev)
450 return -EINVAL;
451
452 if (!is_power_of_2(num_hw_submission))
453 return -EINVAL;
454
455 ring->fence_drv.cpu_addr = NULL;
456 ring->fence_drv.gpu_addr = 0;
457 ring->fence_drv.sync_seq = 0;
458 atomic_set(&ring->fence_drv.last_seq, 0);
459 ring->fence_drv.initialized = false;
460
461 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
462
463 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
464 spin_lock_init(&ring->fence_drv.lock);
465 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
466 GFP_KERNEL);
467 if (!ring->fence_drv.fences)
468 return -ENOMEM;
469
470
471 if (!ring->no_scheduler) {
472 switch (ring->funcs->type) {
473 case AMDGPU_RING_TYPE_GFX:
474 timeout = adev->gfx_timeout;
475 break;
476 case AMDGPU_RING_TYPE_COMPUTE:
477 timeout = adev->compute_timeout;
478 break;
479 case AMDGPU_RING_TYPE_SDMA:
480 timeout = adev->sdma_timeout;
481 break;
482 default:
483 timeout = adev->video_timeout;
484 break;
485 }
486
487 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
488 num_hw_submission, amdgpu_job_hang_limit,
489 timeout, ring->name);
490 if (r) {
491 DRM_ERROR("Failed to create scheduler on ring %s.\n",
492 ring->name);
493 return r;
494 }
495 }
496
497 return 0;
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512int amdgpu_fence_driver_init(struct amdgpu_device *adev)
513{
514 return 0;
515}
516
517
518
519
520
521
522
523
524
525void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
526{
527 unsigned i, j;
528 int r;
529
530 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
531 struct amdgpu_ring *ring = adev->rings[i];
532
533 if (!ring || !ring->fence_drv.initialized)
534 continue;
535 r = amdgpu_fence_wait_empty(ring);
536 if (r) {
537
538 amdgpu_fence_driver_force_completion(ring);
539 }
540 if (ring->fence_drv.irq_src)
541 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
542 ring->fence_drv.irq_type);
543 if (!ring->no_scheduler)
544 drm_sched_fini(&ring->sched);
545 del_timer_sync(&ring->fence_drv.fallback_timer);
546 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
547 dma_fence_put(ring->fence_drv.fences[j]);
548 kfree(ring->fence_drv.fences);
549 ring->fence_drv.fences = NULL;
550 ring->fence_drv.initialized = false;
551 }
552}
553
554
555
556
557
558
559
560
561
562void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
563{
564 int i, r;
565
566 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
567 struct amdgpu_ring *ring = adev->rings[i];
568 if (!ring || !ring->fence_drv.initialized)
569 continue;
570
571
572 r = amdgpu_fence_wait_empty(ring);
573 if (r) {
574
575 amdgpu_fence_driver_force_completion(ring);
576 }
577
578
579 if (ring->fence_drv.irq_src)
580 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
581 ring->fence_drv.irq_type);
582 }
583}
584
585
586
587
588
589
590
591
592
593
594
595
596
597void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
598{
599 int i;
600
601 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
602 struct amdgpu_ring *ring = adev->rings[i];
603 if (!ring || !ring->fence_drv.initialized)
604 continue;
605
606
607 if (ring->fence_drv.irq_src)
608 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
609 ring->fence_drv.irq_type);
610 }
611}
612
613
614
615
616
617
618
619void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
620{
621 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
622 amdgpu_fence_process(ring);
623}
624
625
626
627
628
629static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
630{
631 return "amdgpu";
632}
633
634static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
635{
636 struct amdgpu_fence *fence = to_amdgpu_fence(f);
637 return (const char *)fence->ring->name;
638}
639
640
641
642
643
644
645
646
647
648static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
649{
650 struct amdgpu_fence *fence = to_amdgpu_fence(f);
651 struct amdgpu_ring *ring = fence->ring;
652
653 if (!timer_pending(&ring->fence_drv.fallback_timer))
654 amdgpu_fence_schedule_fallback(ring);
655
656 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
657
658 return true;
659}
660
661
662
663
664
665
666
667
668static void amdgpu_fence_free(struct rcu_head *rcu)
669{
670 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
671 struct amdgpu_fence *fence = to_amdgpu_fence(f);
672 kmem_cache_free(amdgpu_fence_slab, fence);
673}
674
675
676
677
678
679
680
681
682
683static void amdgpu_fence_release(struct dma_fence *f)
684{
685 call_rcu(&f->rcu, amdgpu_fence_free);
686}
687
688static const struct dma_fence_ops amdgpu_fence_ops = {
689 .get_driver_name = amdgpu_fence_get_driver_name,
690 .get_timeline_name = amdgpu_fence_get_timeline_name,
691 .enable_signaling = amdgpu_fence_enable_signaling,
692 .release = amdgpu_fence_release,
693};
694
695
696
697
698#if defined(CONFIG_DEBUG_FS)
699static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
700{
701 struct drm_info_node *node = (struct drm_info_node *)m->private;
702 struct drm_device *dev = node->minor->dev;
703 struct amdgpu_device *adev = dev->dev_private;
704 int i;
705
706 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
707 struct amdgpu_ring *ring = adev->rings[i];
708 if (!ring || !ring->fence_drv.initialized)
709 continue;
710
711 amdgpu_fence_process(ring);
712
713 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
714 seq_printf(m, "Last signaled fence 0x%08x\n",
715 atomic_read(&ring->fence_drv.last_seq));
716 seq_printf(m, "Last emitted 0x%08x\n",
717 ring->fence_drv.sync_seq);
718
719 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
720 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
721 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
722 le32_to_cpu(*ring->trail_fence_cpu_addr));
723 seq_printf(m, "Last emitted 0x%08x\n",
724 ring->trail_seq);
725 }
726
727 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
728 continue;
729
730
731 seq_printf(m, "Last preempted 0x%08x\n",
732 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
733
734 seq_printf(m, "Last reset 0x%08x\n",
735 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
736
737 seq_printf(m, "Last both 0x%08x\n",
738 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
739 }
740 return 0;
741}
742
743
744
745
746
747
748static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
749{
750 struct drm_info_node *node = (struct drm_info_node *) m->private;
751 struct drm_device *dev = node->minor->dev;
752 struct amdgpu_device *adev = dev->dev_private;
753 int r;
754
755 r = pm_runtime_get_sync(dev->dev);
756 if (r < 0) {
757 pm_runtime_put_autosuspend(dev->dev);
758 return 0;
759 }
760
761 seq_printf(m, "gpu recover\n");
762 amdgpu_device_gpu_recover(adev, NULL);
763
764 pm_runtime_mark_last_busy(dev->dev);
765 pm_runtime_put_autosuspend(dev->dev);
766
767 return 0;
768}
769
770static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
771 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
772 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
773};
774
775static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
776 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
777};
778#endif
779
780int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
781{
782#if defined(CONFIG_DEBUG_FS)
783 if (amdgpu_sriov_vf(adev))
784 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov,
785 ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov));
786 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list,
787 ARRAY_SIZE(amdgpu_debugfs_fence_list));
788#else
789 return 0;
790#endif
791}
792
793