1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
35struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct fence *fence;
38};
39
40static struct kmem_cache *amdgpu_sync_slab;
41
42
43
44
45
46
47
48
49void amdgpu_sync_create(struct amdgpu_sync *sync)
50{
51 hash_init(sync->fences);
52 sync->last_vm_update = NULL;
53}
54
55
56
57
58
59
60
61
62
63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
64{
65 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
66
67 if (s_fence) {
68 struct amdgpu_ring *ring;
69
70 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
71 return ring->adev == adev;
72 }
73
74 return false;
75}
76
77
78
79
80
81
82
83
84static void *amdgpu_sync_get_owner(struct fence *f)
85{
86 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
87
88 if (s_fence)
89 return s_fence->owner;
90
91 return AMDGPU_FENCE_OWNER_UNDEFINED;
92}
93
94
95
96
97
98
99
100
101
102static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
103{
104 if (*keep && fence_is_later(*keep, fence))
105 return;
106
107 fence_put(*keep);
108 *keep = fence_get(fence);
109}
110
111
112
113
114
115
116
117
118
119
120static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
121{
122 struct amdgpu_sync_entry *e;
123
124 hash_for_each_possible(sync->fences, e, node, f->context) {
125 if (unlikely(e->fence->context != f->context))
126 continue;
127
128 amdgpu_sync_keep_later(&e->fence, f);
129 return true;
130 }
131 return false;
132}
133
134
135
136
137
138
139
140
141int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
142 struct fence *f)
143{
144 struct amdgpu_sync_entry *e;
145
146 if (!f)
147 return 0;
148
149 if (amdgpu_sync_same_dev(adev, f) &&
150 amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
151 amdgpu_sync_keep_later(&sync->last_vm_update, f);
152
153 if (amdgpu_sync_add_later(sync, f))
154 return 0;
155
156 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
157 if (!e)
158 return -ENOMEM;
159
160 hash_add(sync->fences, &e->node, f->context);
161 e->fence = fence_get(f);
162 return 0;
163}
164
165
166
167
168
169
170
171
172
173
174int amdgpu_sync_resv(struct amdgpu_device *adev,
175 struct amdgpu_sync *sync,
176 struct reservation_object *resv,
177 void *owner)
178{
179 struct reservation_object_list *flist;
180 struct fence *f;
181 void *fence_owner;
182 unsigned i;
183 int r = 0;
184
185 if (resv == NULL)
186 return -EINVAL;
187
188
189 f = reservation_object_get_excl(resv);
190 r = amdgpu_sync_fence(adev, sync, f);
191
192 flist = reservation_object_get_list(resv);
193 if (!flist || r)
194 return r;
195
196 for (i = 0; i < flist->shared_count; ++i) {
197 f = rcu_dereference_protected(flist->shared[i],
198 reservation_object_held(resv));
199 if (amdgpu_sync_same_dev(adev, f)) {
200
201
202
203 fence_owner = amdgpu_sync_get_owner(f);
204 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
205 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
206 ((owner == AMDGPU_FENCE_OWNER_VM) !=
207 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
208 continue;
209
210
211
212
213 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
214 fence_owner == owner)
215 continue;
216 }
217
218 r = amdgpu_sync_fence(adev, sync, f);
219 if (r)
220 break;
221 }
222 return r;
223}
224
225
226
227
228
229
230
231
232
233
234struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
235 struct amdgpu_ring *ring)
236{
237 struct amdgpu_sync_entry *e;
238 struct hlist_node *tmp;
239 int i;
240
241 hash_for_each_safe(sync->fences, i, tmp, e, node) {
242 struct fence *f = e->fence;
243 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
244
245 if (ring && s_fence) {
246
247
248
249 if (s_fence->sched == &ring->sched) {
250 if (fence_is_signaled(&s_fence->scheduled))
251 continue;
252
253 return &s_fence->scheduled;
254 }
255 }
256
257 if (fence_is_signaled(f)) {
258 hash_del(&e->node);
259 fence_put(f);
260 kmem_cache_free(amdgpu_sync_slab, e);
261 continue;
262 }
263
264 return f;
265 }
266
267 return NULL;
268}
269
270
271
272
273
274
275
276
277struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
278{
279 struct amdgpu_sync_entry *e;
280 struct hlist_node *tmp;
281 struct fence *f;
282 int i;
283
284 hash_for_each_safe(sync->fences, i, tmp, e, node) {
285
286 f = e->fence;
287
288 hash_del(&e->node);
289 kmem_cache_free(amdgpu_sync_slab, e);
290
291 if (!fence_is_signaled(f))
292 return f;
293
294 fence_put(f);
295 }
296 return NULL;
297}
298
299
300
301
302
303
304
305
306void amdgpu_sync_free(struct amdgpu_sync *sync)
307{
308 struct amdgpu_sync_entry *e;
309 struct hlist_node *tmp;
310 unsigned i;
311
312 hash_for_each_safe(sync->fences, i, tmp, e, node) {
313 hash_del(&e->node);
314 fence_put(e->fence);
315 kmem_cache_free(amdgpu_sync_slab, e);
316 }
317
318 fence_put(sync->last_vm_update);
319}
320
321
322
323
324
325
326int amdgpu_sync_init(void)
327{
328 amdgpu_sync_slab = kmem_cache_create(
329 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
330 SLAB_HWCACHE_ALIGN, NULL);
331 if (!amdgpu_sync_slab)
332 return -ENOMEM;
333
334 return 0;
335}
336
337
338
339
340
341
342void amdgpu_sync_fini(void)
343{
344 kmem_cache_destroy(amdgpu_sync_slab);
345}
346