1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include "amdgpu.h"
32#include "amdgpu_trace.h"
33#include "amdgpu_amdkfd.h"
34
35struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct dma_fence *fence;
38 bool explicit;
39};
40
41static struct kmem_cache *amdgpu_sync_slab;
42
43
44
45
46
47
48
49
50void amdgpu_sync_create(struct amdgpu_sync *sync)
51{
52 hash_init(sync->fences);
53 sync->last_vm_update = NULL;
54}
55
56
57
58
59
60
61
62
63
64static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
65 struct dma_fence *f)
66{
67 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
68
69 if (s_fence) {
70 struct amdgpu_ring *ring;
71
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
74 }
75
76 return false;
77}
78
79
80
81
82
83
84
85
86static void *amdgpu_sync_get_owner(struct dma_fence *f)
87{
88 struct drm_sched_fence *s_fence;
89 struct amdgpu_amdkfd_fence *kfd_fence;
90
91 if (!f)
92 return AMDGPU_FENCE_OWNER_UNDEFINED;
93
94 s_fence = to_drm_sched_fence(f);
95 if (s_fence)
96 return s_fence->owner;
97
98 kfd_fence = to_amdgpu_amdkfd_fence(f);
99 if (kfd_fence)
100 return AMDGPU_FENCE_OWNER_KFD;
101
102 return AMDGPU_FENCE_OWNER_UNDEFINED;
103}
104
105
106
107
108
109
110
111
112
113static void amdgpu_sync_keep_later(struct dma_fence **keep,
114 struct dma_fence *fence)
115{
116 if (*keep && dma_fence_is_later(*keep, fence))
117 return;
118
119 dma_fence_put(*keep);
120 *keep = dma_fence_get(fence);
121}
122
123
124
125
126
127
128
129
130
131
132static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f,
133 bool explicit)
134{
135 struct amdgpu_sync_entry *e;
136
137 hash_for_each_possible(sync->fences, e, node, f->context) {
138 if (unlikely(e->fence->context != f->context))
139 continue;
140
141 amdgpu_sync_keep_later(&e->fence, f);
142
143
144 e->explicit |= explicit;
145
146 return true;
147 }
148 return false;
149}
150
151
152
153
154
155
156
157
158
159
160int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
161 bool explicit)
162{
163 struct amdgpu_sync_entry *e;
164
165 if (!f)
166 return 0;
167
168 if (amdgpu_sync_add_later(sync, f, explicit))
169 return 0;
170
171 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
172 if (!e)
173 return -ENOMEM;
174
175 e->explicit = explicit;
176
177 hash_add(sync->fences, &e->node, f->context);
178 e->fence = dma_fence_get(f);
179 return 0;
180}
181
182
183
184
185
186
187
188
189
190
191int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
192{
193 if (!fence)
194 return 0;
195
196 amdgpu_sync_keep_later(&sync->last_vm_update, fence);
197 return amdgpu_sync_fence(sync, fence, false);
198}
199
200
201
202
203
204
205
206
207
208
209
210int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
211 struct dma_resv *resv, enum amdgpu_sync_mode mode,
212 void *owner)
213{
214 struct dma_resv_list *flist;
215 struct dma_fence *f;
216 unsigned i;
217 int r = 0;
218
219 if (resv == NULL)
220 return -EINVAL;
221
222
223 f = dma_resv_get_excl(resv);
224 r = amdgpu_sync_fence(sync, f, false);
225
226 flist = dma_resv_get_list(resv);
227 if (!flist || r)
228 return r;
229
230 for (i = 0; i < flist->shared_count; ++i) {
231 void *fence_owner;
232
233 f = rcu_dereference_protected(flist->shared[i],
234 dma_resv_held(resv));
235
236 fence_owner = amdgpu_sync_get_owner(f);
237
238
239 if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
240 r = amdgpu_sync_fence(sync, f, false);
241 if (r)
242 break;
243 }
244
245
246
247
248 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
249 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
250 continue;
251
252
253 if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
254 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
255 continue;
256
257
258 switch (mode) {
259 case AMDGPU_SYNC_ALWAYS:
260 break;
261
262 case AMDGPU_SYNC_NE_OWNER:
263 if (amdgpu_sync_same_dev(adev, f) &&
264 fence_owner == owner)
265 continue;
266 break;
267
268 case AMDGPU_SYNC_EQ_OWNER:
269 if (amdgpu_sync_same_dev(adev, f) &&
270 fence_owner != owner)
271 continue;
272 break;
273
274 case AMDGPU_SYNC_EXPLICIT:
275 continue;
276 }
277
278 r = amdgpu_sync_fence(sync, f, false);
279 if (r)
280 break;
281 }
282 return r;
283}
284
285
286
287
288
289
290
291
292
293
294struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
295 struct amdgpu_ring *ring)
296{
297 struct amdgpu_sync_entry *e;
298 struct hlist_node *tmp;
299 int i;
300
301 hash_for_each_safe(sync->fences, i, tmp, e, node) {
302 struct dma_fence *f = e->fence;
303 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
304
305 if (dma_fence_is_signaled(f)) {
306 hash_del(&e->node);
307 dma_fence_put(f);
308 kmem_cache_free(amdgpu_sync_slab, e);
309 continue;
310 }
311 if (ring && s_fence) {
312
313
314
315 if (s_fence->sched == &ring->sched) {
316 if (dma_fence_is_signaled(&s_fence->scheduled))
317 continue;
318
319 return &s_fence->scheduled;
320 }
321 }
322
323 return f;
324 }
325
326 return NULL;
327}
328
329
330
331
332
333
334
335
336
337struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
338{
339 struct amdgpu_sync_entry *e;
340 struct hlist_node *tmp;
341 struct dma_fence *f;
342 int i;
343 hash_for_each_safe(sync->fences, i, tmp, e, node) {
344
345 f = e->fence;
346 if (explicit)
347 *explicit = e->explicit;
348
349 hash_del(&e->node);
350 kmem_cache_free(amdgpu_sync_slab, e);
351
352 if (!dma_fence_is_signaled(f))
353 return f;
354
355 dma_fence_put(f);
356 }
357 return NULL;
358}
359
360
361
362
363
364
365
366
367
368
369int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
370{
371 struct amdgpu_sync_entry *e;
372 struct hlist_node *tmp;
373 struct dma_fence *f;
374 int i, r;
375
376 hash_for_each_safe(source->fences, i, tmp, e, node) {
377 f = e->fence;
378 if (!dma_fence_is_signaled(f)) {
379 r = amdgpu_sync_fence(clone, f, e->explicit);
380 if (r)
381 return r;
382 } else {
383 hash_del(&e->node);
384 dma_fence_put(f);
385 kmem_cache_free(amdgpu_sync_slab, e);
386 }
387 }
388
389 dma_fence_put(clone->last_vm_update);
390 clone->last_vm_update = dma_fence_get(source->last_vm_update);
391
392 return 0;
393}
394
395int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
396{
397 struct amdgpu_sync_entry *e;
398 struct hlist_node *tmp;
399 int i, r;
400
401 hash_for_each_safe(sync->fences, i, tmp, e, node) {
402 r = dma_fence_wait(e->fence, intr);
403 if (r)
404 return r;
405
406 hash_del(&e->node);
407 dma_fence_put(e->fence);
408 kmem_cache_free(amdgpu_sync_slab, e);
409 }
410
411 return 0;
412}
413
414
415
416
417
418
419
420
421void amdgpu_sync_free(struct amdgpu_sync *sync)
422{
423 struct amdgpu_sync_entry *e;
424 struct hlist_node *tmp;
425 unsigned i;
426
427 hash_for_each_safe(sync->fences, i, tmp, e, node) {
428 hash_del(&e->node);
429 dma_fence_put(e->fence);
430 kmem_cache_free(amdgpu_sync_slab, e);
431 }
432
433 dma_fence_put(sync->last_vm_update);
434}
435
436
437
438
439
440
441int amdgpu_sync_init(void)
442{
443 amdgpu_sync_slab = kmem_cache_create(
444 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
445 SLAB_HWCACHE_ALIGN, NULL);
446 if (!amdgpu_sync_slab)
447 return -ENOMEM;
448
449 return 0;
450}
451
452
453
454
455
456
457void amdgpu_sync_fini(void)
458{
459 kmem_cache_destroy(amdgpu_sync_slab);
460}
461