1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include <drm/drm_debugfs.h>
40
41#include "amdgpu.h"
42#include "amdgpu_trace.h"
43
44
45
46
47
48
49
50
51
52
53struct amdgpu_fence {
54 struct dma_fence base;
55
56
57 struct amdgpu_ring *ring;
58};
59
60static struct kmem_cache *amdgpu_fence_slab;
61
62int amdgpu_fence_slab_init(void)
63{
64 amdgpu_fence_slab = kmem_cache_create(
65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
66 SLAB_HWCACHE_ALIGN, NULL);
67 if (!amdgpu_fence_slab)
68 return -ENOMEM;
69 return 0;
70}
71
72void amdgpu_fence_slab_fini(void)
73{
74 rcu_barrier();
75 kmem_cache_destroy(amdgpu_fence_slab);
76}
77
78
79
80static const struct dma_fence_ops amdgpu_fence_ops;
81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
82{
83 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
84
85 if (__f->base.ops == &amdgpu_fence_ops)
86 return __f;
87
88 return NULL;
89}
90
91
92
93
94
95
96
97
98
99static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
100{
101 struct amdgpu_fence_driver *drv = &ring->fence_drv;
102
103 if (drv->cpu_addr)
104 *drv->cpu_addr = cpu_to_le32(seq);
105}
106
107
108
109
110
111
112
113
114
115static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
116{
117 struct amdgpu_fence_driver *drv = &ring->fence_drv;
118 u32 seq = 0;
119
120 if (drv->cpu_addr)
121 seq = le32_to_cpu(*drv->cpu_addr);
122 else
123 seq = atomic_read(&drv->last_seq);
124
125 return seq;
126}
127
128
129
130
131
132
133
134
135
136
137int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
138 unsigned flags)
139{
140 struct amdgpu_device *adev = ring->adev;
141 struct amdgpu_fence *fence;
142 struct dma_fence __rcu **ptr;
143 uint32_t seq;
144 int r;
145
146 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
147 if (fence == NULL)
148 return -ENOMEM;
149
150 seq = ++ring->fence_drv.sync_seq;
151 fence->ring = ring;
152 dma_fence_init(&fence->base, &amdgpu_fence_ops,
153 &ring->fence_drv.lock,
154 adev->fence_context + ring->idx,
155 seq);
156 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
157 seq, flags | AMDGPU_FENCE_FLAG_INT);
158 pm_runtime_get_noresume(adev->ddev->dev);
159 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
160 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
161 struct dma_fence *old;
162
163 rcu_read_lock();
164 old = dma_fence_get_rcu_safe(ptr);
165 rcu_read_unlock();
166
167 if (old) {
168 r = dma_fence_wait(old, false);
169 dma_fence_put(old);
170 if (r)
171 return r;
172 }
173 }
174
175
176
177
178 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
179
180 *f = &fence->base;
181
182 return 0;
183}
184
185
186
187
188
189
190
191
192
193
194
195int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
196{
197 uint32_t seq;
198
199 if (!s)
200 return -EINVAL;
201
202 seq = ++ring->fence_drv.sync_seq;
203 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
204 seq, 0);
205
206 *s = seq;
207
208 return 0;
209}
210
211
212
213
214
215
216
217
218static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
219{
220 mod_timer(&ring->fence_drv.fallback_timer,
221 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
222}
223
224
225
226
227
228
229
230
231
232
233
234
235bool amdgpu_fence_process(struct amdgpu_ring *ring)
236{
237 struct amdgpu_fence_driver *drv = &ring->fence_drv;
238 struct amdgpu_device *adev = ring->adev;
239 uint32_t seq, last_seq;
240 int r;
241
242 do {
243 last_seq = atomic_read(&ring->fence_drv.last_seq);
244 seq = amdgpu_fence_read(ring);
245
246 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
247
248 if (del_timer(&ring->fence_drv.fallback_timer) &&
249 seq != ring->fence_drv.sync_seq)
250 amdgpu_fence_schedule_fallback(ring);
251
252 if (unlikely(seq == last_seq))
253 return false;
254
255 last_seq &= drv->num_fences_mask;
256 seq &= drv->num_fences_mask;
257
258 do {
259 struct dma_fence *fence, **ptr;
260
261 ++last_seq;
262 last_seq &= drv->num_fences_mask;
263 ptr = &drv->fences[last_seq];
264
265
266 fence = rcu_dereference_protected(*ptr, 1);
267 RCU_INIT_POINTER(*ptr, NULL);
268
269 if (!fence)
270 continue;
271
272 r = dma_fence_signal(fence);
273 if (!r)
274 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
275 else
276 BUG();
277
278 dma_fence_put(fence);
279 pm_runtime_mark_last_busy(adev->ddev->dev);
280 pm_runtime_put_autosuspend(adev->ddev->dev);
281 } while (last_seq != seq);
282
283 return true;
284}
285
286
287
288
289
290
291
292
293static void amdgpu_fence_fallback(struct timer_list *t)
294{
295 struct amdgpu_ring *ring = from_timer(ring, t,
296 fence_drv.fallback_timer);
297
298 if (amdgpu_fence_process(ring))
299 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
300}
301
302
303
304
305
306
307
308
309
310
311int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
312{
313 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
314 struct dma_fence *fence, **ptr;
315 int r;
316
317 if (!seq)
318 return 0;
319
320 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
321 rcu_read_lock();
322 fence = rcu_dereference(*ptr);
323 if (!fence || !dma_fence_get_rcu(fence)) {
324 rcu_read_unlock();
325 return 0;
326 }
327 rcu_read_unlock();
328
329 r = dma_fence_wait(fence, false);
330 dma_fence_put(fence);
331 return r;
332}
333
334
335
336
337
338
339
340
341
342
343
344signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
345 uint32_t wait_seq,
346 signed long timeout)
347{
348 uint32_t seq;
349
350 do {
351 seq = amdgpu_fence_read(ring);
352 udelay(5);
353 timeout -= 5;
354 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
355
356 return timeout > 0 ? timeout : 0;
357}
358
359
360
361
362
363
364
365
366
367unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
368{
369 uint64_t emitted;
370
371
372
373
374 amdgpu_fence_process(ring);
375 emitted = 0x100000000ull;
376 emitted -= atomic_read(&ring->fence_drv.last_seq);
377 emitted += READ_ONCE(ring->fence_drv.sync_seq);
378 return lower_32_bits(emitted);
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
395 struct amdgpu_irq_src *irq_src,
396 unsigned irq_type)
397{
398 struct amdgpu_device *adev = ring->adev;
399 uint64_t index;
400
401 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
402 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
403 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
404 } else {
405
406 index = ALIGN(adev->uvd.fw->size, 8);
407 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
408 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
409 }
410 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
411 amdgpu_irq_get(adev, irq_src, irq_type);
412
413 ring->fence_drv.irq_src = irq_src;
414 ring->fence_drv.irq_type = irq_type;
415 ring->fence_drv.initialized = true;
416
417 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
418 "0x%016llx, cpu addr 0x%p\n", ring->name,
419 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
420 return 0;
421}
422
423
424
425
426
427
428
429
430
431
432
433int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
434 unsigned num_hw_submission)
435{
436 struct amdgpu_device *adev = ring->adev;
437 long timeout;
438 int r;
439
440 if (!adev)
441 return -EINVAL;
442
443
444 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
445 return -EINVAL;
446
447 ring->fence_drv.cpu_addr = NULL;
448 ring->fence_drv.gpu_addr = 0;
449 ring->fence_drv.sync_seq = 0;
450 atomic_set(&ring->fence_drv.last_seq, 0);
451 ring->fence_drv.initialized = false;
452
453 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
454
455 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
456 spin_lock_init(&ring->fence_drv.lock);
457 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
458 GFP_KERNEL);
459 if (!ring->fence_drv.fences)
460 return -ENOMEM;
461
462
463 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
464 switch (ring->funcs->type) {
465 case AMDGPU_RING_TYPE_GFX:
466 timeout = adev->gfx_timeout;
467 break;
468 case AMDGPU_RING_TYPE_COMPUTE:
469 timeout = adev->compute_timeout;
470 break;
471 case AMDGPU_RING_TYPE_SDMA:
472 timeout = adev->sdma_timeout;
473 break;
474 default:
475 timeout = adev->video_timeout;
476 break;
477 }
478
479 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
480 num_hw_submission, amdgpu_job_hang_limit,
481 timeout, ring->name);
482 if (r) {
483 DRM_ERROR("Failed to create scheduler on ring %s.\n",
484 ring->name);
485 return r;
486 }
487 }
488
489 return 0;
490}
491
492
493
494
495
496
497
498
499
500
501
502
503
504int amdgpu_fence_driver_init(struct amdgpu_device *adev)
505{
506 if (amdgpu_debugfs_fence_init(adev))
507 dev_err(adev->dev, "fence debugfs file creation failed\n");
508
509 return 0;
510}
511
512
513
514
515
516
517
518
519
520void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
521{
522 unsigned i, j;
523 int r;
524
525 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
526 struct amdgpu_ring *ring = adev->rings[i];
527
528 if (!ring || !ring->fence_drv.initialized)
529 continue;
530 r = amdgpu_fence_wait_empty(ring);
531 if (r) {
532
533 amdgpu_fence_driver_force_completion(ring);
534 }
535 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
536 ring->fence_drv.irq_type);
537 drm_sched_fini(&ring->sched);
538 del_timer_sync(&ring->fence_drv.fallback_timer);
539 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
540 dma_fence_put(ring->fence_drv.fences[j]);
541 kfree(ring->fence_drv.fences);
542 ring->fence_drv.fences = NULL;
543 ring->fence_drv.initialized = false;
544 }
545}
546
547
548
549
550
551
552
553
554
555void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
556{
557 int i, r;
558
559 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
560 struct amdgpu_ring *ring = adev->rings[i];
561 if (!ring || !ring->fence_drv.initialized)
562 continue;
563
564
565 r = amdgpu_fence_wait_empty(ring);
566 if (r) {
567
568 amdgpu_fence_driver_force_completion(ring);
569 }
570
571
572 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
573 ring->fence_drv.irq_type);
574 }
575}
576
577
578
579
580
581
582
583
584
585
586
587
588
589void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
590{
591 int i;
592
593 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
594 struct amdgpu_ring *ring = adev->rings[i];
595 if (!ring || !ring->fence_drv.initialized)
596 continue;
597
598
599 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
600 ring->fence_drv.irq_type);
601 }
602}
603
604
605
606
607
608
609
610void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
611{
612 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
613 amdgpu_fence_process(ring);
614}
615
616
617
618
619
620static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
621{
622 return "amdgpu";
623}
624
625static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
626{
627 struct amdgpu_fence *fence = to_amdgpu_fence(f);
628 return (const char *)fence->ring->name;
629}
630
631
632
633
634
635
636
637
638
639static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
640{
641 struct amdgpu_fence *fence = to_amdgpu_fence(f);
642 struct amdgpu_ring *ring = fence->ring;
643
644 if (!timer_pending(&ring->fence_drv.fallback_timer))
645 amdgpu_fence_schedule_fallback(ring);
646
647 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
648
649 return true;
650}
651
652
653
654
655
656
657
658
659static void amdgpu_fence_free(struct rcu_head *rcu)
660{
661 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
662 struct amdgpu_fence *fence = to_amdgpu_fence(f);
663 kmem_cache_free(amdgpu_fence_slab, fence);
664}
665
666
667
668
669
670
671
672
673
674static void amdgpu_fence_release(struct dma_fence *f)
675{
676 call_rcu(&f->rcu, amdgpu_fence_free);
677}
678
679static const struct dma_fence_ops amdgpu_fence_ops = {
680 .get_driver_name = amdgpu_fence_get_driver_name,
681 .get_timeline_name = amdgpu_fence_get_timeline_name,
682 .enable_signaling = amdgpu_fence_enable_signaling,
683 .release = amdgpu_fence_release,
684};
685
686
687
688
689#if defined(CONFIG_DEBUG_FS)
690static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
691{
692 struct drm_info_node *node = (struct drm_info_node *)m->private;
693 struct drm_device *dev = node->minor->dev;
694 struct amdgpu_device *adev = dev->dev_private;
695 int i;
696
697 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
698 struct amdgpu_ring *ring = adev->rings[i];
699 if (!ring || !ring->fence_drv.initialized)
700 continue;
701
702 amdgpu_fence_process(ring);
703
704 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
705 seq_printf(m, "Last signaled fence 0x%08x\n",
706 atomic_read(&ring->fence_drv.last_seq));
707 seq_printf(m, "Last emitted 0x%08x\n",
708 ring->fence_drv.sync_seq);
709
710 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
711 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
712 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
713 le32_to_cpu(*ring->trail_fence_cpu_addr));
714 seq_printf(m, "Last emitted 0x%08x\n",
715 ring->trail_seq);
716 }
717
718 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
719 continue;
720
721
722 seq_printf(m, "Last preempted 0x%08x\n",
723 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
724
725 seq_printf(m, "Last reset 0x%08x\n",
726 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
727
728 seq_printf(m, "Last both 0x%08x\n",
729 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
730 }
731 return 0;
732}
733
734
735
736
737
738
739static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
740{
741 struct drm_info_node *node = (struct drm_info_node *) m->private;
742 struct drm_device *dev = node->minor->dev;
743 struct amdgpu_device *adev = dev->dev_private;
744 int r;
745
746 r = pm_runtime_get_sync(dev->dev);
747 if (r < 0)
748 return 0;
749
750 seq_printf(m, "gpu recover\n");
751 amdgpu_device_gpu_recover(adev, NULL);
752
753 pm_runtime_mark_last_busy(dev->dev);
754 pm_runtime_put_autosuspend(dev->dev);
755
756 return 0;
757}
758
759static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
760 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
761 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
762};
763
764static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
765 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
766};
767#endif
768
769int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
770{
771#if defined(CONFIG_DEBUG_FS)
772 if (amdgpu_sriov_vf(adev))
773 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
774 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
775#else
776 return 0;
777#endif
778}
779
780