1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include "amdgpu.h"
32#include "amdgpu_trace.h"
33#include "amdgpu_amdkfd.h"
34
35struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct dma_fence *fence;
38};
39
40static struct kmem_cache *amdgpu_sync_slab;
41
42
43
44
45
46
47
48
49void amdgpu_sync_create(struct amdgpu_sync *sync)
50{
51 hash_init(sync->fences);
52 sync->last_vm_update = NULL;
53}
54
55
56
57
58
59
60
61
62
63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
64 struct dma_fence *f)
65{
66 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
67
68 if (s_fence) {
69 struct amdgpu_ring *ring;
70
71 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
72 return ring->adev == adev;
73 }
74
75 return false;
76}
77
78
79
80
81
82
83
84
85static void *amdgpu_sync_get_owner(struct dma_fence *f)
86{
87 struct drm_sched_fence *s_fence;
88 struct amdgpu_amdkfd_fence *kfd_fence;
89
90 if (!f)
91 return AMDGPU_FENCE_OWNER_UNDEFINED;
92
93 s_fence = to_drm_sched_fence(f);
94 if (s_fence)
95 return s_fence->owner;
96
97 kfd_fence = to_amdgpu_amdkfd_fence(f);
98 if (kfd_fence)
99 return AMDGPU_FENCE_OWNER_KFD;
100
101 return AMDGPU_FENCE_OWNER_UNDEFINED;
102}
103
104
105
106
107
108
109
110
111
112static void amdgpu_sync_keep_later(struct dma_fence **keep,
113 struct dma_fence *fence)
114{
115 if (*keep && dma_fence_is_later(*keep, fence))
116 return;
117
118 dma_fence_put(*keep);
119 *keep = dma_fence_get(fence);
120}
121
122
123
124
125
126
127
128
129
130
131static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
132{
133 struct amdgpu_sync_entry *e;
134
135 hash_for_each_possible(sync->fences, e, node, f->context) {
136 if (unlikely(e->fence->context != f->context))
137 continue;
138
139 amdgpu_sync_keep_later(&e->fence, f);
140 return true;
141 }
142 return false;
143}
144
145
146
147
148
149
150
151
152
153int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
154{
155 struct amdgpu_sync_entry *e;
156
157 if (!f)
158 return 0;
159
160 if (amdgpu_sync_add_later(sync, f))
161 return 0;
162
163 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
164 if (!e)
165 return -ENOMEM;
166
167 hash_add(sync->fences, &e->node, f->context);
168 e->fence = dma_fence_get(f);
169 return 0;
170}
171
172
173
174
175
176
177
178
179
180int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
181{
182 if (!fence)
183 return 0;
184
185 amdgpu_sync_keep_later(&sync->last_vm_update, fence);
186 return amdgpu_sync_fence(sync, fence);
187}
188
189
190
191
192
193
194
195
196
197
198
199
200int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
201 struct dma_resv *resv, enum amdgpu_sync_mode mode,
202 void *owner)
203{
204 struct dma_resv_list *flist;
205 struct dma_fence *f;
206 unsigned i;
207 int r = 0;
208
209 if (resv == NULL)
210 return -EINVAL;
211
212
213 f = dma_resv_get_excl(resv);
214 r = amdgpu_sync_fence(sync, f);
215
216 flist = dma_resv_get_list(resv);
217 if (!flist || r)
218 return r;
219
220 for (i = 0; i < flist->shared_count; ++i) {
221 void *fence_owner;
222
223 f = rcu_dereference_protected(flist->shared[i],
224 dma_resv_held(resv));
225
226 fence_owner = amdgpu_sync_get_owner(f);
227
228
229 if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
230 r = amdgpu_sync_fence(sync, f);
231 if (r)
232 break;
233 }
234
235
236
237
238 if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
239 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
240 continue;
241
242
243 if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
244 owner != AMDGPU_FENCE_OWNER_UNDEFINED)
245 continue;
246
247
248 switch (mode) {
249 case AMDGPU_SYNC_ALWAYS:
250 break;
251
252 case AMDGPU_SYNC_NE_OWNER:
253 if (amdgpu_sync_same_dev(adev, f) &&
254 fence_owner == owner)
255 continue;
256 break;
257
258 case AMDGPU_SYNC_EQ_OWNER:
259 if (amdgpu_sync_same_dev(adev, f) &&
260 fence_owner != owner)
261 continue;
262 break;
263
264 case AMDGPU_SYNC_EXPLICIT:
265 continue;
266 }
267
268 WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
269 "Adding eviction fence to sync obj");
270 r = amdgpu_sync_fence(sync, f);
271 if (r)
272 break;
273 }
274 return r;
275}
276
277
278
279
280
281
282
283
284
285
286struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
287 struct amdgpu_ring *ring)
288{
289 struct amdgpu_sync_entry *e;
290 struct hlist_node *tmp;
291 int i;
292
293 hash_for_each_safe(sync->fences, i, tmp, e, node) {
294 struct dma_fence *f = e->fence;
295 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
296
297 if (dma_fence_is_signaled(f)) {
298 hash_del(&e->node);
299 dma_fence_put(f);
300 kmem_cache_free(amdgpu_sync_slab, e);
301 continue;
302 }
303 if (ring && s_fence) {
304
305
306
307 if (s_fence->sched == &ring->sched) {
308 if (dma_fence_is_signaled(&s_fence->scheduled))
309 continue;
310
311 return &s_fence->scheduled;
312 }
313 }
314
315 return f;
316 }
317
318 return NULL;
319}
320
321
322
323
324
325
326
327
328struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
329{
330 struct amdgpu_sync_entry *e;
331 struct hlist_node *tmp;
332 struct dma_fence *f;
333 int i;
334 hash_for_each_safe(sync->fences, i, tmp, e, node) {
335
336 f = e->fence;
337
338 hash_del(&e->node);
339 kmem_cache_free(amdgpu_sync_slab, e);
340
341 if (!dma_fence_is_signaled(f))
342 return f;
343
344 dma_fence_put(f);
345 }
346 return NULL;
347}
348
349
350
351
352
353
354
355
356
357
358int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
359{
360 struct amdgpu_sync_entry *e;
361 struct hlist_node *tmp;
362 struct dma_fence *f;
363 int i, r;
364
365 hash_for_each_safe(source->fences, i, tmp, e, node) {
366 f = e->fence;
367 if (!dma_fence_is_signaled(f)) {
368 r = amdgpu_sync_fence(clone, f);
369 if (r)
370 return r;
371 } else {
372 hash_del(&e->node);
373 dma_fence_put(f);
374 kmem_cache_free(amdgpu_sync_slab, e);
375 }
376 }
377
378 dma_fence_put(clone->last_vm_update);
379 clone->last_vm_update = dma_fence_get(source->last_vm_update);
380
381 return 0;
382}
383
384int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
385{
386 struct amdgpu_sync_entry *e;
387 struct hlist_node *tmp;
388 int i, r;
389
390 hash_for_each_safe(sync->fences, i, tmp, e, node) {
391 r = dma_fence_wait(e->fence, intr);
392 if (r)
393 return r;
394
395 hash_del(&e->node);
396 dma_fence_put(e->fence);
397 kmem_cache_free(amdgpu_sync_slab, e);
398 }
399
400 return 0;
401}
402
403
404
405
406
407
408
409
410void amdgpu_sync_free(struct amdgpu_sync *sync)
411{
412 struct amdgpu_sync_entry *e;
413 struct hlist_node *tmp;
414 unsigned i;
415
416 hash_for_each_safe(sync->fences, i, tmp, e, node) {
417 hash_del(&e->node);
418 dma_fence_put(e->fence);
419 kmem_cache_free(amdgpu_sync_slab, e);
420 }
421
422 dma_fence_put(sync->last_vm_update);
423}
424
425
426
427
428
429
430int amdgpu_sync_init(void)
431{
432 amdgpu_sync_slab = kmem_cache_create(
433 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
434 SLAB_HWCACHE_ALIGN, NULL);
435 if (!amdgpu_sync_slab)
436 return -ENOMEM;
437
438 return 0;
439}
440
441
442
443
444
445
446void amdgpu_sync_fini(void)
447{
448 kmem_cache_destroy(amdgpu_sync_slab);
449}
450