1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <drm/drmP.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40
41
42
43
44
45
46
47
48
49
50struct amdgpu_fence {
51 struct dma_fence base;
52
53
54 struct amdgpu_ring *ring;
55};
56
57static struct kmem_cache *amdgpu_fence_slab;
58
59int amdgpu_fence_slab_init(void)
60{
61 amdgpu_fence_slab = kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 SLAB_HWCACHE_ALIGN, NULL);
64 if (!amdgpu_fence_slab)
65 return -ENOMEM;
66 return 0;
67}
68
69void amdgpu_fence_slab_fini(void)
70{
71 rcu_barrier();
72 kmem_cache_destroy(amdgpu_fence_slab);
73}
74
75
76
77static const struct dma_fence_ops amdgpu_fence_ops;
78static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
79{
80 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
81
82 if (__f->base.ops == &amdgpu_fence_ops)
83 return __f;
84
85 return NULL;
86}
87
88
89
90
91
92
93
94
95
96static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
97{
98 struct amdgpu_fence_driver *drv = &ring->fence_drv;
99
100 if (drv->cpu_addr)
101 *drv->cpu_addr = cpu_to_le32(seq);
102}
103
104
105
106
107
108
109
110
111
112static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
113{
114 struct amdgpu_fence_driver *drv = &ring->fence_drv;
115 u32 seq = 0;
116
117 if (drv->cpu_addr)
118 seq = le32_to_cpu(*drv->cpu_addr);
119 else
120 seq = atomic_read(&drv->last_seq);
121
122 return seq;
123}
124
125
126
127
128
129
130
131
132
133
134int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
135 unsigned flags)
136{
137 struct amdgpu_device *adev = ring->adev;
138 struct amdgpu_fence *fence;
139 struct dma_fence *old, **ptr;
140 uint32_t seq;
141
142 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
143 if (fence == NULL)
144 return -ENOMEM;
145
146 seq = ++ring->fence_drv.sync_seq;
147 fence->ring = ring;
148 dma_fence_init(&fence->base, &amdgpu_fence_ops,
149 &ring->fence_drv.lock,
150 adev->fence_context + ring->idx,
151 seq);
152 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
153 seq, flags | AMDGPU_FENCE_FLAG_INT);
154
155 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
156
157
158
159 old = rcu_dereference_protected(*ptr, 1);
160 if (old && !dma_fence_is_signaled(old)) {
161 DRM_INFO("rcu slot is busy\n");
162 dma_fence_wait(old, false);
163 }
164
165 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
166
167 *f = &fence->base;
168
169 return 0;
170}
171
172
173
174
175
176
177
178
179
180
181
182int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
183{
184 uint32_t seq;
185
186 if (!s)
187 return -EINVAL;
188
189 seq = ++ring->fence_drv.sync_seq;
190 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
191 seq, 0);
192
193 *s = seq;
194
195 return 0;
196}
197
198
199
200
201
202
203
204
205static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
206{
207 mod_timer(&ring->fence_drv.fallback_timer,
208 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
209}
210
211
212
213
214
215
216
217
218
219
220void amdgpu_fence_process(struct amdgpu_ring *ring)
221{
222 struct amdgpu_fence_driver *drv = &ring->fence_drv;
223 uint32_t seq, last_seq;
224 int r;
225
226 do {
227 last_seq = atomic_read(&ring->fence_drv.last_seq);
228 seq = amdgpu_fence_read(ring);
229
230 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
231
232 if (seq != ring->fence_drv.sync_seq)
233 amdgpu_fence_schedule_fallback(ring);
234
235 if (unlikely(seq == last_seq))
236 return;
237
238 last_seq &= drv->num_fences_mask;
239 seq &= drv->num_fences_mask;
240
241 do {
242 struct dma_fence *fence, **ptr;
243
244 ++last_seq;
245 last_seq &= drv->num_fences_mask;
246 ptr = &drv->fences[last_seq];
247
248
249 fence = rcu_dereference_protected(*ptr, 1);
250 RCU_INIT_POINTER(*ptr, NULL);
251
252 if (!fence)
253 continue;
254
255 r = dma_fence_signal(fence);
256 if (!r)
257 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
258 else
259 BUG();
260
261 dma_fence_put(fence);
262 } while (last_seq != seq);
263}
264
265
266
267
268
269
270
271
272static void amdgpu_fence_fallback(struct timer_list *t)
273{
274 struct amdgpu_ring *ring = from_timer(ring, t,
275 fence_drv.fallback_timer);
276
277 amdgpu_fence_process(ring);
278}
279
280
281
282
283
284
285
286
287
288
289int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
290{
291 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
292 struct dma_fence *fence, **ptr;
293 int r;
294
295 if (!seq)
296 return 0;
297
298 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
299 rcu_read_lock();
300 fence = rcu_dereference(*ptr);
301 if (!fence || !dma_fence_get_rcu(fence)) {
302 rcu_read_unlock();
303 return 0;
304 }
305 rcu_read_unlock();
306
307 r = dma_fence_wait(fence, false);
308 dma_fence_put(fence);
309 return r;
310}
311
312
313
314
315
316
317
318
319
320
321
322signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
323 uint32_t wait_seq,
324 signed long timeout)
325{
326 uint32_t seq;
327
328 do {
329 seq = amdgpu_fence_read(ring);
330 udelay(5);
331 timeout -= 5;
332 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
333
334 return timeout > 0 ? timeout : 0;
335}
336
337
338
339
340
341
342
343
344
345unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
346{
347 uint64_t emitted;
348
349
350
351
352 amdgpu_fence_process(ring);
353 emitted = 0x100000000ull;
354 emitted -= atomic_read(&ring->fence_drv.last_seq);
355 emitted += READ_ONCE(ring->fence_drv.sync_seq);
356 return lower_32_bits(emitted);
357}
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
373 struct amdgpu_irq_src *irq_src,
374 unsigned irq_type)
375{
376 struct amdgpu_device *adev = ring->adev;
377 uint64_t index;
378
379 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
380 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
381 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
382 } else {
383
384 index = ALIGN(adev->uvd.fw->size, 8);
385 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
386 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
387 }
388 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
389 amdgpu_irq_get(adev, irq_src, irq_type);
390
391 ring->fence_drv.irq_src = irq_src;
392 ring->fence_drv.irq_type = irq_type;
393 ring->fence_drv.initialized = true;
394
395 dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
396 "cpu addr 0x%p\n", ring->idx,
397 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410
411int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
412 unsigned num_hw_submission)
413{
414 long timeout;
415 int r;
416
417
418 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
419 return -EINVAL;
420
421 ring->fence_drv.cpu_addr = NULL;
422 ring->fence_drv.gpu_addr = 0;
423 ring->fence_drv.sync_seq = 0;
424 atomic_set(&ring->fence_drv.last_seq, 0);
425 ring->fence_drv.initialized = false;
426
427 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
428
429 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
430 spin_lock_init(&ring->fence_drv.lock);
431 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
432 GFP_KERNEL);
433 if (!ring->fence_drv.fences)
434 return -ENOMEM;
435
436
437 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
438
439 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
440 && !amdgpu_sriov_vf(ring->adev))
441 timeout = MAX_SCHEDULE_TIMEOUT;
442 else
443 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
444
445 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
446 num_hw_submission, amdgpu_job_hang_limit,
447 timeout, ring->name);
448 if (r) {
449 DRM_ERROR("Failed to create scheduler on ring %s.\n",
450 ring->name);
451 return r;
452 }
453 }
454
455 return 0;
456}
457
458
459
460
461
462
463
464
465
466
467
468
469
470int amdgpu_fence_driver_init(struct amdgpu_device *adev)
471{
472 if (amdgpu_debugfs_fence_init(adev))
473 dev_err(adev->dev, "fence debugfs file creation failed\n");
474
475 return 0;
476}
477
478
479
480
481
482
483
484
485
486void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
487{
488 unsigned i, j;
489 int r;
490
491 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
492 struct amdgpu_ring *ring = adev->rings[i];
493
494 if (!ring || !ring->fence_drv.initialized)
495 continue;
496 r = amdgpu_fence_wait_empty(ring);
497 if (r) {
498
499 amdgpu_fence_driver_force_completion(ring);
500 }
501 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
502 ring->fence_drv.irq_type);
503 drm_sched_fini(&ring->sched);
504 del_timer_sync(&ring->fence_drv.fallback_timer);
505 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
506 dma_fence_put(ring->fence_drv.fences[j]);
507 kfree(ring->fence_drv.fences);
508 ring->fence_drv.fences = NULL;
509 ring->fence_drv.initialized = false;
510 }
511}
512
513
514
515
516
517
518
519
520
521void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
522{
523 int i, r;
524
525 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
526 struct amdgpu_ring *ring = adev->rings[i];
527 if (!ring || !ring->fence_drv.initialized)
528 continue;
529
530
531 r = amdgpu_fence_wait_empty(ring);
532 if (r) {
533
534 amdgpu_fence_driver_force_completion(ring);
535 }
536
537
538 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
539 ring->fence_drv.irq_type);
540 }
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
556{
557 int i;
558
559 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
560 struct amdgpu_ring *ring = adev->rings[i];
561 if (!ring || !ring->fence_drv.initialized)
562 continue;
563
564
565 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
566 ring->fence_drv.irq_type);
567 }
568}
569
570
571
572
573
574
575
576void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
577{
578 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
579 amdgpu_fence_process(ring);
580}
581
582
583
584
585
586static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
587{
588 return "amdgpu";
589}
590
591static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
592{
593 struct amdgpu_fence *fence = to_amdgpu_fence(f);
594 return (const char *)fence->ring->name;
595}
596
597
598
599
600
601
602
603
604
605static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
606{
607 struct amdgpu_fence *fence = to_amdgpu_fence(f);
608 struct amdgpu_ring *ring = fence->ring;
609
610 if (!timer_pending(&ring->fence_drv.fallback_timer))
611 amdgpu_fence_schedule_fallback(ring);
612
613 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
614
615 return true;
616}
617
618
619
620
621
622
623
624
625static void amdgpu_fence_free(struct rcu_head *rcu)
626{
627 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
628 struct amdgpu_fence *fence = to_amdgpu_fence(f);
629 kmem_cache_free(amdgpu_fence_slab, fence);
630}
631
632
633
634
635
636
637
638
639
640static void amdgpu_fence_release(struct dma_fence *f)
641{
642 call_rcu(&f->rcu, amdgpu_fence_free);
643}
644
645static const struct dma_fence_ops amdgpu_fence_ops = {
646 .get_driver_name = amdgpu_fence_get_driver_name,
647 .get_timeline_name = amdgpu_fence_get_timeline_name,
648 .enable_signaling = amdgpu_fence_enable_signaling,
649 .release = amdgpu_fence_release,
650};
651
652
653
654
655#if defined(CONFIG_DEBUG_FS)
656static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
657{
658 struct drm_info_node *node = (struct drm_info_node *)m->private;
659 struct drm_device *dev = node->minor->dev;
660 struct amdgpu_device *adev = dev->dev_private;
661 int i;
662
663 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
664 struct amdgpu_ring *ring = adev->rings[i];
665 if (!ring || !ring->fence_drv.initialized)
666 continue;
667
668 amdgpu_fence_process(ring);
669
670 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
671 seq_printf(m, "Last signaled fence 0x%08x\n",
672 atomic_read(&ring->fence_drv.last_seq));
673 seq_printf(m, "Last emitted 0x%08x\n",
674 ring->fence_drv.sync_seq);
675
676 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
677 continue;
678
679
680 seq_printf(m, "Last preempted 0x%08x\n",
681 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
682
683 seq_printf(m, "Last reset 0x%08x\n",
684 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
685
686 seq_printf(m, "Last both 0x%08x\n",
687 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
688 }
689 return 0;
690}
691
692
693
694
695
696
697static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
698{
699 struct drm_info_node *node = (struct drm_info_node *) m->private;
700 struct drm_device *dev = node->minor->dev;
701 struct amdgpu_device *adev = dev->dev_private;
702
703 seq_printf(m, "gpu recover\n");
704 amdgpu_device_gpu_recover(adev, NULL, true);
705
706 return 0;
707}
708
709static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
710 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
711 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
712};
713
714static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
715 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
716};
717#endif
718
719int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
720{
721#if defined(CONFIG_DEBUG_FS)
722 if (amdgpu_sriov_vf(adev))
723 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
724 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
725#else
726 return 0;
727#endif
728}
729
730