1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "radeon.h"
26#include "radeon_asic.h"
27#include "evergreen.h"
28#include "evergreend.h"
29
30
31
32
33
34
35
36
37
38
39
40void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
41 struct radeon_fence *fence)
42{
43 struct radeon_ring *ring = &rdev->ring[fence->ring];
44 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
45
46 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
47 radeon_ring_write(ring, addr & 0xfffffffc);
48 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
49 radeon_ring_write(ring, fence->seq);
50
51 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
52
53 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
54 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
55 radeon_ring_write(ring, 1);
56}
57
58
59
60
61
62
63
64
65
66void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
67 struct radeon_ib *ib)
68{
69 struct radeon_ring *ring = &rdev->ring[ib->ring];
70
71 if (rdev->wb.enabled) {
72 u32 next_rptr = ring->wptr + 4;
73 while ((next_rptr & 7) != 5)
74 next_rptr++;
75 next_rptr += 3;
76 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
77 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
78 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
79 radeon_ring_write(ring, next_rptr);
80 }
81
82
83
84
85 while ((ring->wptr & 7) != 5)
86 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
87 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
88 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
89 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
90
91}
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
107 uint64_t src_offset,
108 uint64_t dst_offset,
109 unsigned num_gpu_pages,
110 struct dma_resv *resv)
111{
112 struct radeon_fence *fence;
113 struct radeon_sync sync;
114 int ring_index = rdev->asic->copy.dma_ring_index;
115 struct radeon_ring *ring = &rdev->ring[ring_index];
116 u32 size_in_dw, cur_size_in_dw;
117 int i, num_loops;
118 int r = 0;
119
120 radeon_sync_create(&sync);
121
122 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
123 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
124 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
125 if (r) {
126 DRM_ERROR("radeon: moving bo (%d).\n", r);
127 radeon_sync_free(rdev, &sync, NULL);
128 return ERR_PTR(r);
129 }
130
131 radeon_sync_resv(rdev, &sync, resv, false);
132 radeon_sync_rings(rdev, &sync, ring->idx);
133
134 for (i = 0; i < num_loops; i++) {
135 cur_size_in_dw = size_in_dw;
136 if (cur_size_in_dw > 0xFFFFF)
137 cur_size_in_dw = 0xFFFFF;
138 size_in_dw -= cur_size_in_dw;
139 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
140 radeon_ring_write(ring, dst_offset & 0xfffffffc);
141 radeon_ring_write(ring, src_offset & 0xfffffffc);
142 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
143 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
144 src_offset += cur_size_in_dw * 4;
145 dst_offset += cur_size_in_dw * 4;
146 }
147
148 r = radeon_fence_emit(rdev, &fence, ring->idx);
149 if (r) {
150 radeon_ring_unlock_undo(rdev, ring);
151 radeon_sync_free(rdev, &sync, NULL);
152 return ERR_PTR(r);
153 }
154
155 radeon_ring_unlock_commit(rdev, ring, false);
156 radeon_sync_free(rdev, &sync, fence);
157
158 return fence;
159}
160
161
162
163
164
165
166
167
168
169
170bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
171{
172 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
173
174 if (!(reset_mask & RADEON_RESET_DMA)) {
175 radeon_ring_lockup_update(rdev, ring);
176 return false;
177 }
178 return radeon_ring_test_lockup(rdev, ring);
179}
180
181
182