1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
35struct amdgpu_sync_entry {
36 struct hlist_node node;
37 struct fence *fence;
38};
39
40static struct kmem_cache *amdgpu_sync_slab;
41
42
43
44
45
46
47
48
49void amdgpu_sync_create(struct amdgpu_sync *sync)
50{
51 hash_init(sync->fences);
52 sync->last_vm_update = NULL;
53}
54
55
56
57
58
59
60
61
62
63static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
64{
65 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
66
67 if (s_fence) {
68 struct amdgpu_ring *ring;
69
70 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
71 return ring->adev == adev;
72 }
73
74 return false;
75}
76
77
78
79
80
81
82
83
84static void *amdgpu_sync_get_owner(struct fence *f)
85{
86 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
87
88 if (s_fence)
89 return s_fence->owner;
90
91 return AMDGPU_FENCE_OWNER_UNDEFINED;
92}
93
94
95
96
97
98
99
100
101
102static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
103{
104 if (*keep && fence_is_later(*keep, fence))
105 return;
106
107 fence_put(*keep);
108 *keep = fence_get(fence);
109}
110
111
112
113
114
115
116
117
118int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
119 struct fence *f)
120{
121 struct amdgpu_sync_entry *e;
122
123 if (!f)
124 return 0;
125
126 if (amdgpu_sync_same_dev(adev, f) &&
127 amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
128 amdgpu_sync_keep_later(&sync->last_vm_update, f);
129
130 hash_for_each_possible(sync->fences, e, node, f->context) {
131 if (unlikely(e->fence->context != f->context))
132 continue;
133
134 amdgpu_sync_keep_later(&e->fence, f);
135 return 0;
136 }
137
138 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
139 if (!e)
140 return -ENOMEM;
141
142 hash_add(sync->fences, &e->node, f->context);
143 e->fence = fence_get(f);
144 return 0;
145}
146
147
148
149
150
151
152
153
154
155
156int amdgpu_sync_resv(struct amdgpu_device *adev,
157 struct amdgpu_sync *sync,
158 struct reservation_object *resv,
159 void *owner)
160{
161 struct reservation_object_list *flist;
162 struct fence *f;
163 void *fence_owner;
164 unsigned i;
165 int r = 0;
166
167 if (resv == NULL)
168 return -EINVAL;
169
170
171 f = reservation_object_get_excl(resv);
172 r = amdgpu_sync_fence(adev, sync, f);
173
174 flist = reservation_object_get_list(resv);
175 if (!flist || r)
176 return r;
177
178 for (i = 0; i < flist->shared_count; ++i) {
179 f = rcu_dereference_protected(flist->shared[i],
180 reservation_object_held(resv));
181 if (amdgpu_sync_same_dev(adev, f)) {
182
183
184
185 fence_owner = amdgpu_sync_get_owner(f);
186 if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
187 (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
188 ((owner == AMDGPU_FENCE_OWNER_VM) !=
189 (fence_owner == AMDGPU_FENCE_OWNER_VM)))
190 continue;
191
192
193
194
195 if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
196 fence_owner == owner)
197 continue;
198 }
199
200 r = amdgpu_sync_fence(adev, sync, f);
201 if (r)
202 break;
203 }
204 return r;
205}
206
207struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
208{
209 struct amdgpu_sync_entry *e;
210 struct hlist_node *tmp;
211 struct fence *f;
212 int i;
213
214 hash_for_each_safe(sync->fences, i, tmp, e, node) {
215
216 f = e->fence;
217
218 hash_del(&e->node);
219 kmem_cache_free(amdgpu_sync_slab, e);
220
221 if (!fence_is_signaled(f))
222 return f;
223
224 fence_put(f);
225 }
226 return NULL;
227}
228
229int amdgpu_sync_wait(struct amdgpu_sync *sync)
230{
231 struct amdgpu_sync_entry *e;
232 struct hlist_node *tmp;
233 int i, r;
234
235 hash_for_each_safe(sync->fences, i, tmp, e, node) {
236 r = fence_wait(e->fence, false);
237 if (r)
238 return r;
239
240 hash_del(&e->node);
241 fence_put(e->fence);
242 kmem_cache_free(amdgpu_sync_slab, e);
243 }
244
245 return 0;
246}
247
248
249
250
251
252
253
254
255void amdgpu_sync_free(struct amdgpu_sync *sync)
256{
257 struct amdgpu_sync_entry *e;
258 struct hlist_node *tmp;
259 unsigned i;
260
261 hash_for_each_safe(sync->fences, i, tmp, e, node) {
262 hash_del(&e->node);
263 fence_put(e->fence);
264 kmem_cache_free(amdgpu_sync_slab, e);
265 }
266
267 fence_put(sync->last_vm_update);
268}
269
270
271
272
273
274
275int amdgpu_sync_init(void)
276{
277 amdgpu_sync_slab = kmem_cache_create(
278 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
279 SLAB_HWCACHE_ALIGN, NULL);
280 if (!amdgpu_sync_slab)
281 return -ENOMEM;
282
283 return 0;
284}
285
286
287
288
289
290
291void amdgpu_sync_fini(void)
292{
293 kmem_cache_destroy(amdgpu_sync_slab);
294}
295