1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <drm/drmP.h>
32#include "radeon.h"
33#include "radeon_trace.h"
34
35
36
37
38
39
40
41
42void radeon_sync_create(struct radeon_sync *sync)
43{
44 unsigned i;
45
46 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
47 sync->semaphores[i] = NULL;
48
49 for (i = 0; i < RADEON_NUM_RINGS; ++i)
50 sync->sync_to[i] = NULL;
51
52 sync->last_vm_update = NULL;
53}
54
55
56
57
58
59
60
61
62
63void radeon_sync_fence(struct radeon_sync *sync,
64 struct radeon_fence *fence)
65{
66 struct radeon_fence *other;
67
68 if (!fence)
69 return;
70
71 other = sync->sync_to[fence->ring];
72 sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
73
74 if (fence->is_vm_update) {
75 other = sync->last_vm_update;
76 sync->last_vm_update = radeon_fence_later(fence, other);
77 }
78}
79
80
81
82
83
84
85
86
87
88
89int radeon_sync_resv(struct radeon_device *rdev,
90 struct radeon_sync *sync,
91 struct reservation_object *resv,
92 bool shared)
93{
94 struct reservation_object_list *flist;
95 struct dma_fence *f;
96 struct radeon_fence *fence;
97 unsigned i;
98 int r = 0;
99
100
101 f = reservation_object_get_excl(resv);
102 fence = f ? to_radeon_fence(f) : NULL;
103 if (fence && fence->rdev == rdev)
104 radeon_sync_fence(sync, fence);
105 else if (f)
106 r = dma_fence_wait(f, true);
107
108 flist = reservation_object_get_list(resv);
109 if (shared || !flist || r)
110 return r;
111
112 for (i = 0; i < flist->shared_count; ++i) {
113 f = rcu_dereference_protected(flist->shared[i],
114 reservation_object_held(resv));
115 fence = to_radeon_fence(f);
116 if (fence && fence->rdev == rdev)
117 radeon_sync_fence(sync, fence);
118 else
119 r = dma_fence_wait(f, true);
120
121 if (r)
122 break;
123 }
124 return r;
125}
126
127
128
129
130
131
132
133
134
135
136
137int radeon_sync_rings(struct radeon_device *rdev,
138 struct radeon_sync *sync,
139 int ring)
140{
141 unsigned count = 0;
142 int i, r;
143
144 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
145 struct radeon_fence *fence = sync->sync_to[i];
146 struct radeon_semaphore *semaphore;
147
148
149 if (!radeon_fence_need_sync(fence, ring))
150 continue;
151
152
153 if (!rdev->ring[i].ready) {
154 dev_err(rdev->dev, "Syncing to a disabled ring!");
155 return -EINVAL;
156 }
157
158 if (count >= RADEON_NUM_SYNCS) {
159
160 r = radeon_fence_wait(fence, false);
161 if (r)
162 return r;
163 continue;
164 }
165 r = radeon_semaphore_create(rdev, &semaphore);
166 if (r)
167 return r;
168
169 sync->semaphores[count++] = semaphore;
170
171
172 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
173 if (r)
174 return r;
175
176
177 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
178
179 radeon_ring_undo(&rdev->ring[i]);
180 r = radeon_fence_wait(fence, false);
181 if (r)
182 return r;
183 continue;
184 }
185
186
187 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
188
189 radeon_ring_undo(&rdev->ring[i]);
190 r = radeon_fence_wait(fence, false);
191 if (r)
192 return r;
193 continue;
194 }
195
196 radeon_ring_commit(rdev, &rdev->ring[i], false);
197 radeon_fence_note_sync(fence, ring);
198 }
199
200 return 0;
201}
202
203
204
205
206
207
208
209
210
211
212void radeon_sync_free(struct radeon_device *rdev,
213 struct radeon_sync *sync,
214 struct radeon_fence *fence)
215{
216 unsigned i;
217
218 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
219 radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
220}
221