1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "amdgpu_ids.h"
24
25#include <linux/idr.h>
26#include <linux/dma-fence-array.h>
27#include <drm/drmP.h>
28
29#include "amdgpu.h"
30#include "amdgpu_trace.h"
31
32
33
34
35
36
37
38
39
40
41static DEFINE_IDA(amdgpu_pasid_ida);
42
43
44
45
46
47
48
49
50
51
52
53
54int amdgpu_pasid_alloc(unsigned int bits)
55{
56 int pasid = -EINVAL;
57
58 for (bits = min(bits, 31U); bits > 0; bits--) {
59 pasid = ida_simple_get(&amdgpu_pasid_ida,
60 1U << (bits - 1), 1U << bits,
61 GFP_KERNEL);
62 if (pasid != -ENOSPC)
63 break;
64 }
65
66 return pasid;
67}
68
69
70
71
72
73void amdgpu_pasid_free(unsigned int pasid)
74{
75 ida_simple_remove(&amdgpu_pasid_ida, pasid);
76}
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
93 struct amdgpu_vmid *id)
94{
95 return id->current_gpu_reset_count !=
96 atomic_read(&adev->gpu_reset_counter);
97}
98
99
100static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
101 struct amdgpu_ring *ring,
102 struct amdgpu_sync *sync,
103 struct dma_fence *fence,
104 struct amdgpu_job *job)
105{
106 struct amdgpu_device *adev = ring->adev;
107 unsigned vmhub = ring->funcs->vmhub;
108 uint64_t fence_context = adev->fence_context + ring->idx;
109 struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
110 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
111 struct dma_fence *updates = sync->last_vm_update;
112 int r = 0;
113 struct dma_fence *flushed, *tmp;
114 bool needs_flush = vm->use_cpu_for_update;
115
116 flushed = id->flushed_updates;
117 if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
118 (atomic64_read(&id->owner) != vm->entity.fence_context) ||
119 (job->vm_pd_addr != id->pd_gpu_addr) ||
120 (updates && (!flushed || updates->context != flushed->context ||
121 dma_fence_is_later(updates, flushed))) ||
122 (!id->last_flush || (id->last_flush->context != fence_context &&
123 !dma_fence_is_signaled(id->last_flush)))) {
124 needs_flush = true;
125
126 id->pd_gpu_addr = 0;
127 tmp = amdgpu_sync_peek_fence(&id->active, ring);
128 if (tmp) {
129 r = amdgpu_sync_fence(adev, sync, tmp, false);
130 return r;
131 }
132 }
133
134
135
136
137 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
138 if (r)
139 goto out;
140
141 if (updates && (!flushed || updates->context != flushed->context ||
142 dma_fence_is_later(updates, flushed))) {
143 dma_fence_put(id->flushed_updates);
144 id->flushed_updates = dma_fence_get(updates);
145 }
146 id->pd_gpu_addr = job->vm_pd_addr;
147 atomic64_set(&id->owner, vm->entity.fence_context);
148 job->vm_needs_flush = needs_flush;
149 if (needs_flush) {
150 dma_fence_put(id->last_flush);
151 id->last_flush = NULL;
152 }
153 job->vmid = id - id_mgr->ids;
154 trace_amdgpu_vm_grab_id(vm, ring, job);
155out:
156 return r;
157}
158
159
160
161
162
163
164
165
166
167
168
169int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
170 struct amdgpu_sync *sync, struct dma_fence *fence,
171 struct amdgpu_job *job)
172{
173 struct amdgpu_device *adev = ring->adev;
174 unsigned vmhub = ring->funcs->vmhub;
175 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
176 uint64_t fence_context = adev->fence_context + ring->idx;
177 struct dma_fence *updates = sync->last_vm_update;
178 struct amdgpu_vmid *id, *idle;
179 struct dma_fence **fences;
180 unsigned i;
181 int r = 0;
182
183 mutex_lock(&id_mgr->lock);
184 if (vm->reserved_vmid[vmhub]) {
185 r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
186 mutex_unlock(&id_mgr->lock);
187 return r;
188 }
189 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
190 if (!fences) {
191 mutex_unlock(&id_mgr->lock);
192 return -ENOMEM;
193 }
194
195 i = 0;
196 list_for_each_entry(idle, &id_mgr->ids_lru, list) {
197 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
198 if (!fences[i])
199 break;
200 ++i;
201 }
202
203
204 if (&idle->list == &id_mgr->ids_lru) {
205 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
206 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
207 struct dma_fence_array *array;
208 unsigned j;
209
210 for (j = 0; j < i; ++j)
211 dma_fence_get(fences[j]);
212
213 array = dma_fence_array_create(i, fences, fence_context,
214 seqno, true);
215 if (!array) {
216 for (j = 0; j < i; ++j)
217 dma_fence_put(fences[j]);
218 kfree(fences);
219 r = -ENOMEM;
220 goto error;
221 }
222
223
224 r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
225 dma_fence_put(&array->base);
226 if (r)
227 goto error;
228
229 mutex_unlock(&id_mgr->lock);
230 return 0;
231
232 }
233 kfree(fences);
234
235 job->vm_needs_flush = vm->use_cpu_for_update;
236
237 list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
238 struct dma_fence *flushed;
239 bool needs_flush = vm->use_cpu_for_update;
240
241
242 if (amdgpu_vmid_had_gpu_reset(adev, id))
243 continue;
244
245 if (atomic64_read(&id->owner) != vm->entity.fence_context)
246 continue;
247
248 if (job->vm_pd_addr != id->pd_gpu_addr)
249 continue;
250
251 if (!id->last_flush ||
252 (id->last_flush->context != fence_context &&
253 !dma_fence_is_signaled(id->last_flush)))
254 needs_flush = true;
255
256 flushed = id->flushed_updates;
257 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
258 needs_flush = true;
259
260
261 if (adev->asic_type < CHIP_VEGA10 && needs_flush)
262 continue;
263
264
265
266
267 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
268 if (r)
269 goto error;
270
271 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
272 dma_fence_put(id->flushed_updates);
273 id->flushed_updates = dma_fence_get(updates);
274 }
275
276 if (needs_flush)
277 goto needs_flush;
278 else
279 goto no_flush_needed;
280
281 };
282
283
284 id = idle;
285
286
287 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
288 if (r)
289 goto error;
290
291 id->pd_gpu_addr = job->vm_pd_addr;
292 dma_fence_put(id->flushed_updates);
293 id->flushed_updates = dma_fence_get(updates);
294 atomic64_set(&id->owner, vm->entity.fence_context);
295
296needs_flush:
297 job->vm_needs_flush = true;
298 dma_fence_put(id->last_flush);
299 id->last_flush = NULL;
300
301no_flush_needed:
302 list_move_tail(&id->list, &id_mgr->ids_lru);
303
304 job->vmid = id - id_mgr->ids;
305 trace_amdgpu_vm_grab_id(vm, ring, job);
306
307error:
308 mutex_unlock(&id_mgr->lock);
309 return r;
310}
311
312int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
313 struct amdgpu_vm *vm,
314 unsigned vmhub)
315{
316 struct amdgpu_vmid_mgr *id_mgr;
317 struct amdgpu_vmid *idle;
318 int r = 0;
319
320 id_mgr = &adev->vm_manager.id_mgr[vmhub];
321 mutex_lock(&id_mgr->lock);
322 if (vm->reserved_vmid[vmhub])
323 goto unlock;
324 if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
325 AMDGPU_VM_MAX_RESERVED_VMID) {
326 DRM_ERROR("Over limitation of reserved vmid\n");
327 atomic_dec(&id_mgr->reserved_vmid_num);
328 r = -EINVAL;
329 goto unlock;
330 }
331
332 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
333 list_del_init(&idle->list);
334 vm->reserved_vmid[vmhub] = idle;
335 mutex_unlock(&id_mgr->lock);
336
337 return 0;
338unlock:
339 mutex_unlock(&id_mgr->lock);
340 return r;
341}
342
343void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
344 struct amdgpu_vm *vm,
345 unsigned vmhub)
346{
347 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
348
349 mutex_lock(&id_mgr->lock);
350 if (vm->reserved_vmid[vmhub]) {
351 list_add(&vm->reserved_vmid[vmhub]->list,
352 &id_mgr->ids_lru);
353 vm->reserved_vmid[vmhub] = NULL;
354 atomic_dec(&id_mgr->reserved_vmid_num);
355 }
356 mutex_unlock(&id_mgr->lock);
357}
358
359
360
361
362
363
364
365
366
367void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
368 unsigned vmid)
369{
370 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
371 struct amdgpu_vmid *id = &id_mgr->ids[vmid];
372
373 atomic64_set(&id->owner, 0);
374 id->gds_base = 0;
375 id->gds_size = 0;
376 id->gws_base = 0;
377 id->gws_size = 0;
378 id->oa_base = 0;
379 id->oa_size = 0;
380}
381
382
383
384
385
386
387
388
389void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
390{
391 unsigned i, j;
392
393 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
394 struct amdgpu_vmid_mgr *id_mgr =
395 &adev->vm_manager.id_mgr[i];
396
397 for (j = 1; j < id_mgr->num_ids; ++j)
398 amdgpu_vmid_reset(adev, i, j);
399 }
400}
401
402
403
404
405
406
407
408
409void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
410{
411 unsigned i, j;
412
413 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
414 struct amdgpu_vmid_mgr *id_mgr =
415 &adev->vm_manager.id_mgr[i];
416
417 mutex_init(&id_mgr->lock);
418 INIT_LIST_HEAD(&id_mgr->ids_lru);
419 atomic_set(&id_mgr->reserved_vmid_num, 0);
420
421
422 for (j = 1; j < id_mgr->num_ids; ++j) {
423 amdgpu_vmid_reset(adev, i, j);
424 amdgpu_sync_create(&id_mgr->ids[i].active);
425 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
426 }
427 }
428
429 adev->vm_manager.fence_context =
430 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
431 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
432 adev->vm_manager.seqno[i] = 0;
433}
434
435
436
437
438
439
440
441
442void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
443{
444 unsigned i, j;
445
446 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
447 struct amdgpu_vmid_mgr *id_mgr =
448 &adev->vm_manager.id_mgr[i];
449
450 mutex_destroy(&id_mgr->lock);
451 for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
452 struct amdgpu_vmid *id = &id_mgr->ids[j];
453
454 amdgpu_sync_free(&id->active);
455 dma_fence_put(id->flushed_updates);
456 dma_fence_put(id->last_flush);
457 }
458 }
459}
460