1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/dma-buf.h>
23#include <linux/list.h>
24#include <linux/pagemap.h>
25#include <linux/sched/mm.h>
26#include <linux/sched/task.h>
27
28#include "amdgpu_object.h"
29#include "amdgpu_gem.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_amdkfd.h"
32#include "amdgpu_dma_buf.h"
33#include <uapi/linux/kfd_ioctl.h>
34#include "amdgpu_xgmi.h"
35
36
37
38
39#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41
42static struct {
43 uint64_t max_system_mem_limit;
44 uint64_t max_ttm_mem_limit;
45 int64_t system_mem_used;
46 int64_t ttm_mem_used;
47 spinlock_t mem_limit_lock;
48} kfd_mem_limit;
49
50static const char * const domain_bit_to_string[] = {
51 "CPU",
52 "GTT",
53 "VRAM",
54 "GDS",
55 "GWS",
56 "OA"
57};
58
59#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
60
61static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
62
63
64static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
65{
66 return (struct amdgpu_device *)kgd;
67}
68
69static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
70 struct kgd_mem *mem)
71{
72 struct kfd_mem_attachment *entry;
73
74 list_for_each_entry(entry, &mem->attachments, list)
75 if (entry->bo_va->base.vm == avm)
76 return true;
77
78 return false;
79}
80
81
82
83
84
85void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
86{
87 struct sysinfo si;
88 uint64_t mem;
89
90 si_meminfo(&si);
91 mem = si.freeram - si.freehigh;
92 mem *= si.mem_unit;
93
94 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
95 kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
96 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
97 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
98 (kfd_mem_limit.max_system_mem_limit >> 20),
99 (kfd_mem_limit.max_ttm_mem_limit >> 20));
100}
101
102void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
103{
104 kfd_mem_limit.system_mem_used += size;
105}
106
107
108
109
110
111
112
113
114
115
116
117#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
118
119static size_t amdgpu_amdkfd_acc_size(uint64_t size)
120{
121 size >>= PAGE_SHIFT;
122 size *= sizeof(dma_addr_t) + sizeof(void *);
123
124 return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
125 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
126 PAGE_ALIGN(size);
127}
128
129static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
130 uint64_t size, u32 domain, bool sg)
131{
132 uint64_t reserved_for_pt =
133 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
134 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
135 int ret = 0;
136
137 acc_size = amdgpu_amdkfd_acc_size(size);
138
139 vram_needed = 0;
140 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
141
142 system_mem_needed = acc_size + size;
143 ttm_mem_needed = acc_size + size;
144 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
145
146 system_mem_needed = acc_size + size;
147 ttm_mem_needed = acc_size;
148 } else {
149
150 system_mem_needed = acc_size;
151 ttm_mem_needed = acc_size;
152 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
153 vram_needed = size;
154 }
155
156 spin_lock(&kfd_mem_limit.mem_limit_lock);
157
158 if (kfd_mem_limit.system_mem_used + system_mem_needed >
159 kfd_mem_limit.max_system_mem_limit)
160 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
161
162 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
163 kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
164 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
165 kfd_mem_limit.max_ttm_mem_limit) ||
166 (adev->kfd.vram_used + vram_needed >
167 adev->gmc.real_vram_size - reserved_for_pt)) {
168 ret = -ENOMEM;
169 } else {
170 kfd_mem_limit.system_mem_used += system_mem_needed;
171 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
172 adev->kfd.vram_used += vram_needed;
173 }
174
175 spin_unlock(&kfd_mem_limit.mem_limit_lock);
176 return ret;
177}
178
179static void unreserve_mem_limit(struct amdgpu_device *adev,
180 uint64_t size, u32 domain, bool sg)
181{
182 size_t acc_size;
183
184 acc_size = amdgpu_amdkfd_acc_size(size);
185
186 spin_lock(&kfd_mem_limit.mem_limit_lock);
187 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
188 kfd_mem_limit.system_mem_used -= (acc_size + size);
189 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
190 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
191 kfd_mem_limit.system_mem_used -= (acc_size + size);
192 kfd_mem_limit.ttm_mem_used -= acc_size;
193 } else {
194 kfd_mem_limit.system_mem_used -= acc_size;
195 kfd_mem_limit.ttm_mem_used -= acc_size;
196 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
197 adev->kfd.vram_used -= size;
198 WARN_ONCE(adev->kfd.vram_used < 0,
199 "kfd VRAM memory accounting unbalanced");
200 }
201 }
202 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
203 "kfd system memory accounting unbalanced");
204 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
205 "kfd TTM memory accounting unbalanced");
206
207 spin_unlock(&kfd_mem_limit.mem_limit_lock);
208}
209
210void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
211{
212 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213 u32 domain = bo->preferred_domains;
214 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
215
216 if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
217 domain = AMDGPU_GEM_DOMAIN_CPU;
218 sg = false;
219 }
220
221 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
222}
223
224
225
226
227
228
229
230
231
232
233
234static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
235 struct amdgpu_amdkfd_fence *ef)
236{
237 struct dma_resv *resv = bo->tbo.base.resv;
238 struct dma_resv_list *old, *new;
239 unsigned int i, j, k;
240
241 if (!ef)
242 return -EINVAL;
243
244 old = dma_resv_shared_list(resv);
245 if (!old)
246 return 0;
247
248 new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
249 if (!new)
250 return -ENOMEM;
251
252
253
254
255 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
256 struct dma_fence *f;
257
258 f = rcu_dereference_protected(old->shared[i],
259 dma_resv_held(resv));
260
261 if (f->context == ef->base.context)
262 RCU_INIT_POINTER(new->shared[--j], f);
263 else
264 RCU_INIT_POINTER(new->shared[k++], f);
265 }
266 new->shared_max = old->shared_max;
267 new->shared_count = k;
268
269
270 write_seqcount_begin(&resv->seq);
271 RCU_INIT_POINTER(resv->fence, new);
272 write_seqcount_end(&resv->seq);
273
274
275 for (i = j; i < old->shared_count; ++i) {
276 struct dma_fence *f;
277
278 f = rcu_dereference_protected(new->shared[i],
279 dma_resv_held(resv));
280 dma_fence_put(f);
281 }
282 kfree_rcu(old, rcu);
283
284 return 0;
285}
286
287int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
288{
289 struct amdgpu_bo *root = bo;
290 struct amdgpu_vm_bo_base *vm_bo;
291 struct amdgpu_vm *vm;
292 struct amdkfd_process_info *info;
293 struct amdgpu_amdkfd_fence *ef;
294 int ret;
295
296
297 while (root->parent)
298 root = root->parent;
299
300 vm_bo = root->vm_bo;
301 if (!vm_bo)
302 return 0;
303
304 vm = vm_bo->vm;
305 if (!vm)
306 return 0;
307
308 info = vm->process_info;
309 if (!info || !info->eviction_fence)
310 return 0;
311
312 ef = container_of(dma_fence_get(&info->eviction_fence->base),
313 struct amdgpu_amdkfd_fence, base);
314
315 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
316 ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
317 dma_resv_unlock(bo->tbo.base.resv);
318
319 dma_fence_put(&ef->base);
320 return ret;
321}
322
323static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
324 bool wait)
325{
326 struct ttm_operation_ctx ctx = { false, false };
327 int ret;
328
329 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
330 "Called with userptr BO"))
331 return -EINVAL;
332
333 amdgpu_bo_placement_from_domain(bo, domain);
334
335 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
336 if (ret)
337 goto validate_fail;
338 if (wait)
339 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
340
341validate_fail:
342 return ret;
343}
344
345static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
346{
347 return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
348}
349
350
351
352
353
354
355
356
357static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
358{
359 struct amdgpu_bo *pd = vm->root.bo;
360 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
361 int ret;
362
363 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
364 if (ret) {
365 pr_err("failed to validate PT BOs\n");
366 return ret;
367 }
368
369 ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
370 if (ret) {
371 pr_err("failed to validate PD\n");
372 return ret;
373 }
374
375 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
376
377 if (vm->use_cpu_for_update) {
378 ret = amdgpu_bo_kmap(pd, NULL);
379 if (ret) {
380 pr_err("failed to kmap PD, ret=%d\n", ret);
381 return ret;
382 }
383 }
384
385 return 0;
386}
387
388static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
389{
390 struct amdgpu_bo *pd = vm->root.bo;
391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
392 int ret;
393
394 ret = amdgpu_vm_update_pdes(adev, vm, false);
395 if (ret)
396 return ret;
397
398 return amdgpu_sync_fence(sync, vm->last_update);
399}
400
401static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
402{
403 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
404 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
405 bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
406 uint32_t mapping_flags;
407 uint64_t pte_flags;
408 bool snoop = false;
409
410 mapping_flags = AMDGPU_VM_PAGE_READABLE;
411 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
412 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
413 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
414 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
415
416 switch (adev->asic_type) {
417 case CHIP_ARCTURUS:
418 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
419 if (bo_adev == adev)
420 mapping_flags |= coherent ?
421 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
422 else
423 mapping_flags |= coherent ?
424 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
425 } else {
426 mapping_flags |= coherent ?
427 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
428 }
429 break;
430 case CHIP_ALDEBARAN:
431 if (coherent && uncached) {
432 if (adev->gmc.xgmi.connected_to_cpu ||
433 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
434 snoop = true;
435 mapping_flags |= AMDGPU_VM_MTYPE_UC;
436 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
437 if (bo_adev == adev) {
438 mapping_flags |= coherent ?
439 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
440 if (adev->gmc.xgmi.connected_to_cpu)
441 snoop = true;
442 } else {
443 mapping_flags |= coherent ?
444 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
445 if (amdgpu_xgmi_same_hive(adev, bo_adev))
446 snoop = true;
447 }
448 } else {
449 snoop = true;
450 mapping_flags |= coherent ?
451 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
452 }
453 break;
454 default:
455 mapping_flags |= coherent ?
456 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
457 }
458
459 pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
460 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
461
462 return pte_flags;
463}
464
465static int
466kfd_mem_dmamap_userptr(struct kgd_mem *mem,
467 struct kfd_mem_attachment *attachment)
468{
469 enum dma_data_direction direction =
470 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
471 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
472 struct ttm_operation_ctx ctx = {.interruptible = true};
473 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
474 struct amdgpu_device *adev = attachment->adev;
475 struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
476 struct ttm_tt *ttm = bo->tbo.ttm;
477 int ret;
478
479 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
480 if (unlikely(!ttm->sg))
481 return -ENOMEM;
482
483 if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
484 return -EINVAL;
485
486
487 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
488 ttm->num_pages, 0,
489 (u64)ttm->num_pages << PAGE_SHIFT,
490 GFP_KERNEL);
491 if (unlikely(ret))
492 goto free_sg;
493
494 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
495 if (unlikely(ret))
496 goto release_sg;
497
498 drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
499 ttm->num_pages);
500
501 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
502 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
503 if (ret)
504 goto unmap_sg;
505
506 return 0;
507
508unmap_sg:
509 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
510release_sg:
511 pr_err("DMA map userptr failed: %d\n", ret);
512 sg_free_table(ttm->sg);
513free_sg:
514 kfree(ttm->sg);
515 ttm->sg = NULL;
516 return ret;
517}
518
519static int
520kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
521{
522 struct ttm_operation_ctx ctx = {.interruptible = true};
523 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
524
525 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
526 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
527}
528
529static int
530kfd_mem_dmamap_attachment(struct kgd_mem *mem,
531 struct kfd_mem_attachment *attachment)
532{
533 switch (attachment->type) {
534 case KFD_MEM_ATT_SHARED:
535 return 0;
536 case KFD_MEM_ATT_USERPTR:
537 return kfd_mem_dmamap_userptr(mem, attachment);
538 case KFD_MEM_ATT_DMABUF:
539 return kfd_mem_dmamap_dmabuf(attachment);
540 default:
541 WARN_ON_ONCE(1);
542 }
543 return -EINVAL;
544}
545
546static void
547kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
548 struct kfd_mem_attachment *attachment)
549{
550 enum dma_data_direction direction =
551 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
552 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
553 struct ttm_operation_ctx ctx = {.interruptible = false};
554 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
555 struct amdgpu_device *adev = attachment->adev;
556 struct ttm_tt *ttm = bo->tbo.ttm;
557
558 if (unlikely(!ttm->sg))
559 return;
560
561 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
562 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
563
564 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
565 sg_free_table(ttm->sg);
566 ttm->sg = NULL;
567}
568
569static void
570kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
571{
572 struct ttm_operation_ctx ctx = {.interruptible = true};
573 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
574
575 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
576 ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
577}
578
579static void
580kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
581 struct kfd_mem_attachment *attachment)
582{
583 switch (attachment->type) {
584 case KFD_MEM_ATT_SHARED:
585 break;
586 case KFD_MEM_ATT_USERPTR:
587 kfd_mem_dmaunmap_userptr(mem, attachment);
588 break;
589 case KFD_MEM_ATT_DMABUF:
590 kfd_mem_dmaunmap_dmabuf(attachment);
591 break;
592 default:
593 WARN_ON_ONCE(1);
594 }
595}
596
597static int
598kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
599 struct amdgpu_bo **bo)
600{
601 unsigned long bo_size = mem->bo->tbo.base.size;
602 struct drm_gem_object *gobj;
603 int ret;
604
605 ret = amdgpu_bo_reserve(mem->bo, false);
606 if (ret)
607 return ret;
608
609 ret = amdgpu_gem_object_create(adev, bo_size, 1,
610 AMDGPU_GEM_DOMAIN_CPU,
611 AMDGPU_GEM_CREATE_PREEMPTIBLE,
612 ttm_bo_type_sg, mem->bo->tbo.base.resv,
613 &gobj);
614 amdgpu_bo_unreserve(mem->bo);
615 if (ret)
616 return ret;
617
618 *bo = gem_to_amdgpu_bo(gobj);
619 (*bo)->parent = amdgpu_bo_ref(mem->bo);
620
621 return 0;
622}
623
624static int
625kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
626 struct amdgpu_bo **bo)
627{
628 struct drm_gem_object *gobj;
629 int ret;
630
631 if (!mem->dmabuf) {
632 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
633 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
634 DRM_RDWR : 0);
635 if (IS_ERR(mem->dmabuf)) {
636 ret = PTR_ERR(mem->dmabuf);
637 mem->dmabuf = NULL;
638 return ret;
639 }
640 }
641
642 gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
643 if (IS_ERR(gobj))
644 return PTR_ERR(gobj);
645
646
647
648
649
650 dma_buf_put(mem->dmabuf);
651
652 *bo = gem_to_amdgpu_bo(gobj);
653 (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
654 (*bo)->parent = amdgpu_bo_ref(mem->bo);
655
656 return 0;
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
673 struct amdgpu_vm *vm, bool is_aql)
674{
675 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
676 unsigned long bo_size = mem->bo->tbo.base.size;
677 uint64_t va = mem->va;
678 struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
679 struct amdgpu_bo *bo[2] = {NULL, NULL};
680 int i, ret;
681
682 if (!va) {
683 pr_err("Invalid VA when adding BO to VM\n");
684 return -EINVAL;
685 }
686
687 for (i = 0; i <= is_aql; i++) {
688 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
689 if (unlikely(!attachment[i])) {
690 ret = -ENOMEM;
691 goto unwind;
692 }
693
694 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
695 va + bo_size, vm);
696
697 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
698 amdgpu_xgmi_same_hive(adev, bo_adev))) {
699
700
701
702 attachment[i]->type = KFD_MEM_ATT_SHARED;
703 bo[i] = mem->bo;
704 drm_gem_object_get(&bo[i]->tbo.base);
705 } else if (i > 0) {
706
707 attachment[i]->type = KFD_MEM_ATT_SHARED;
708 bo[i] = bo[0];
709 drm_gem_object_get(&bo[i]->tbo.base);
710 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
711
712 attachment[i]->type = KFD_MEM_ATT_USERPTR;
713 ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
714 if (ret)
715 goto unwind;
716 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
717 mem->bo->tbo.type != ttm_bo_type_sg) {
718
719
720
721
722 attachment[i]->type = KFD_MEM_ATT_DMABUF;
723 ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
724 if (ret)
725 goto unwind;
726 } else {
727
728
729
730 attachment[i]->type = KFD_MEM_ATT_SHARED;
731 bo[i] = mem->bo;
732 drm_gem_object_get(&bo[i]->tbo.base);
733 }
734
735
736 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
737 if (unlikely(!attachment[i]->bo_va)) {
738 ret = -ENOMEM;
739 pr_err("Failed to add BO object to VM. ret == %d\n",
740 ret);
741 goto unwind;
742 }
743
744 attachment[i]->va = va;
745 attachment[i]->pte_flags = get_pte_flags(adev, mem);
746 attachment[i]->adev = adev;
747 list_add(&attachment[i]->list, &mem->attachments);
748
749 va += bo_size;
750 }
751
752 return 0;
753
754unwind:
755 for (; i >= 0; i--) {
756 if (!attachment[i])
757 continue;
758 if (attachment[i]->bo_va) {
759 amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
760 list_del(&attachment[i]->list);
761 }
762 if (bo[i])
763 drm_gem_object_put(&bo[i]->tbo.base);
764 kfree(attachment[i]);
765 }
766 return ret;
767}
768
769static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
770{
771 struct amdgpu_bo *bo = attachment->bo_va->base.bo;
772
773 pr_debug("\t remove VA 0x%llx in entry %p\n",
774 attachment->va, attachment);
775 amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
776 drm_gem_object_put(&bo->tbo.base);
777 list_del(&attachment->list);
778 kfree(attachment);
779}
780
781static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
782 struct amdkfd_process_info *process_info,
783 bool userptr)
784{
785 struct ttm_validate_buffer *entry = &mem->validate_list;
786 struct amdgpu_bo *bo = mem->bo;
787
788 INIT_LIST_HEAD(&entry->head);
789 entry->num_shared = 1;
790 entry->bo = &bo->tbo;
791 mutex_lock(&process_info->lock);
792 if (userptr)
793 list_add_tail(&entry->head, &process_info->userptr_valid_list);
794 else
795 list_add_tail(&entry->head, &process_info->kfd_bo_list);
796 mutex_unlock(&process_info->lock);
797}
798
799static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
800 struct amdkfd_process_info *process_info)
801{
802 struct ttm_validate_buffer *bo_list_entry;
803
804 bo_list_entry = &mem->validate_list;
805 mutex_lock(&process_info->lock);
806 list_del(&bo_list_entry->head);
807 mutex_unlock(&process_info->lock);
808}
809
810
811
812
813
814
815
816
817
818
819
820
821
822static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
823{
824 struct amdkfd_process_info *process_info = mem->process_info;
825 struct amdgpu_bo *bo = mem->bo;
826 struct ttm_operation_ctx ctx = { true, false };
827 int ret = 0;
828
829 mutex_lock(&process_info->lock);
830
831 ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
832 if (ret) {
833 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
834 goto out;
835 }
836
837 ret = amdgpu_mn_register(bo, user_addr);
838 if (ret) {
839 pr_err("%s: Failed to register MMU notifier: %d\n",
840 __func__, ret);
841 goto out;
842 }
843
844 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
845 if (ret) {
846 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
847 goto unregister_out;
848 }
849
850 ret = amdgpu_bo_reserve(bo, true);
851 if (ret) {
852 pr_err("%s: Failed to reserve BO\n", __func__);
853 goto release_out;
854 }
855 amdgpu_bo_placement_from_domain(bo, mem->domain);
856 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
857 if (ret)
858 pr_err("%s: failed to validate BO\n", __func__);
859 amdgpu_bo_unreserve(bo);
860
861release_out:
862 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
863unregister_out:
864 if (ret)
865 amdgpu_mn_unregister(bo);
866out:
867 mutex_unlock(&process_info->lock);
868 return ret;
869}
870
871
872
873
874
875
876struct bo_vm_reservation_context {
877 struct amdgpu_bo_list_entry kfd_bo;
878 unsigned int n_vms;
879 struct amdgpu_bo_list_entry *vm_pd;
880 struct ww_acquire_ctx ticket;
881 struct list_head list, duplicates;
882 struct amdgpu_sync *sync;
883 bool reserved;
884};
885
886enum bo_vm_match {
887 BO_VM_NOT_MAPPED = 0,
888 BO_VM_MAPPED,
889 BO_VM_ALL,
890};
891
892
893
894
895
896
897
898static int reserve_bo_and_vm(struct kgd_mem *mem,
899 struct amdgpu_vm *vm,
900 struct bo_vm_reservation_context *ctx)
901{
902 struct amdgpu_bo *bo = mem->bo;
903 int ret;
904
905 WARN_ON(!vm);
906
907 ctx->reserved = false;
908 ctx->n_vms = 1;
909 ctx->sync = &mem->sync;
910
911 INIT_LIST_HEAD(&ctx->list);
912 INIT_LIST_HEAD(&ctx->duplicates);
913
914 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
915 if (!ctx->vm_pd)
916 return -ENOMEM;
917
918 ctx->kfd_bo.priority = 0;
919 ctx->kfd_bo.tv.bo = &bo->tbo;
920 ctx->kfd_bo.tv.num_shared = 1;
921 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
922
923 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
924
925 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
926 false, &ctx->duplicates);
927 if (ret) {
928 pr_err("Failed to reserve buffers in ttm.\n");
929 kfree(ctx->vm_pd);
930 ctx->vm_pd = NULL;
931 return ret;
932 }
933
934 ctx->reserved = true;
935 return 0;
936}
937
938
939
940
941
942
943
944
945
946
947
948static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
949 struct amdgpu_vm *vm, enum bo_vm_match map_type,
950 struct bo_vm_reservation_context *ctx)
951{
952 struct amdgpu_bo *bo = mem->bo;
953 struct kfd_mem_attachment *entry;
954 unsigned int i;
955 int ret;
956
957 ctx->reserved = false;
958 ctx->n_vms = 0;
959 ctx->vm_pd = NULL;
960 ctx->sync = &mem->sync;
961
962 INIT_LIST_HEAD(&ctx->list);
963 INIT_LIST_HEAD(&ctx->duplicates);
964
965 list_for_each_entry(entry, &mem->attachments, list) {
966 if ((vm && vm != entry->bo_va->base.vm) ||
967 (entry->is_mapped != map_type
968 && map_type != BO_VM_ALL))
969 continue;
970
971 ctx->n_vms++;
972 }
973
974 if (ctx->n_vms != 0) {
975 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
976 GFP_KERNEL);
977 if (!ctx->vm_pd)
978 return -ENOMEM;
979 }
980
981 ctx->kfd_bo.priority = 0;
982 ctx->kfd_bo.tv.bo = &bo->tbo;
983 ctx->kfd_bo.tv.num_shared = 1;
984 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
985
986 i = 0;
987 list_for_each_entry(entry, &mem->attachments, list) {
988 if ((vm && vm != entry->bo_va->base.vm) ||
989 (entry->is_mapped != map_type
990 && map_type != BO_VM_ALL))
991 continue;
992
993 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
994 &ctx->vm_pd[i]);
995 i++;
996 }
997
998 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
999 false, &ctx->duplicates);
1000 if (ret) {
1001 pr_err("Failed to reserve buffers in ttm.\n");
1002 kfree(ctx->vm_pd);
1003 ctx->vm_pd = NULL;
1004 return ret;
1005 }
1006
1007 ctx->reserved = true;
1008 return 0;
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1022 bool wait, bool intr)
1023{
1024 int ret = 0;
1025
1026 if (wait)
1027 ret = amdgpu_sync_wait(ctx->sync, intr);
1028
1029 if (ctx->reserved)
1030 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1031 kfree(ctx->vm_pd);
1032
1033 ctx->sync = NULL;
1034
1035 ctx->reserved = false;
1036 ctx->vm_pd = NULL;
1037
1038 return ret;
1039}
1040
1041static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1042 struct kfd_mem_attachment *entry,
1043 struct amdgpu_sync *sync)
1044{
1045 struct amdgpu_bo_va *bo_va = entry->bo_va;
1046 struct amdgpu_device *adev = entry->adev;
1047 struct amdgpu_vm *vm = bo_va->base.vm;
1048
1049 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1050
1051 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1052
1053 amdgpu_sync_fence(sync, bo_va->last_pt_update);
1054
1055 kfd_mem_dmaunmap_attachment(mem, entry);
1056}
1057
1058static int update_gpuvm_pte(struct kgd_mem *mem,
1059 struct kfd_mem_attachment *entry,
1060 struct amdgpu_sync *sync)
1061{
1062 struct amdgpu_bo_va *bo_va = entry->bo_va;
1063 struct amdgpu_device *adev = entry->adev;
1064 int ret;
1065
1066 ret = kfd_mem_dmamap_attachment(mem, entry);
1067 if (ret)
1068 return ret;
1069
1070
1071 ret = amdgpu_vm_bo_update(adev, bo_va, false);
1072 if (ret) {
1073 pr_err("amdgpu_vm_bo_update failed\n");
1074 return ret;
1075 }
1076
1077 return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1078}
1079
1080static int map_bo_to_gpuvm(struct kgd_mem *mem,
1081 struct kfd_mem_attachment *entry,
1082 struct amdgpu_sync *sync,
1083 bool no_update_pte)
1084{
1085 int ret;
1086
1087
1088 ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1089 amdgpu_bo_size(entry->bo_va->base.bo),
1090 entry->pte_flags);
1091 if (ret) {
1092 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1093 entry->va, ret);
1094 return ret;
1095 }
1096
1097 if (no_update_pte)
1098 return 0;
1099
1100 ret = update_gpuvm_pte(mem, entry, sync);
1101 if (ret) {
1102 pr_err("update_gpuvm_pte() failed\n");
1103 goto update_gpuvm_pte_failed;
1104 }
1105
1106 return 0;
1107
1108update_gpuvm_pte_failed:
1109 unmap_bo_from_gpuvm(mem, entry, sync);
1110 return ret;
1111}
1112
1113static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1114{
1115 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1116
1117 if (!sg)
1118 return NULL;
1119 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1120 kfree(sg);
1121 return NULL;
1122 }
1123 sg->sgl->dma_address = addr;
1124 sg->sgl->length = size;
1125#ifdef CONFIG_NEED_SG_DMA_LENGTH
1126 sg->sgl->dma_length = size;
1127#endif
1128 return sg;
1129}
1130
1131static int process_validate_vms(struct amdkfd_process_info *process_info)
1132{
1133 struct amdgpu_vm *peer_vm;
1134 int ret;
1135
1136 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1137 vm_list_node) {
1138 ret = vm_validate_pt_pd_bos(peer_vm);
1139 if (ret)
1140 return ret;
1141 }
1142
1143 return 0;
1144}
1145
1146static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1147 struct amdgpu_sync *sync)
1148{
1149 struct amdgpu_vm *peer_vm;
1150 int ret;
1151
1152 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1153 vm_list_node) {
1154 struct amdgpu_bo *pd = peer_vm->root.bo;
1155
1156 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1157 AMDGPU_SYNC_NE_OWNER,
1158 AMDGPU_FENCE_OWNER_KFD);
1159 if (ret)
1160 return ret;
1161 }
1162
1163 return 0;
1164}
1165
1166static int process_update_pds(struct amdkfd_process_info *process_info,
1167 struct amdgpu_sync *sync)
1168{
1169 struct amdgpu_vm *peer_vm;
1170 int ret;
1171
1172 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1173 vm_list_node) {
1174 ret = vm_update_pds(peer_vm, sync);
1175 if (ret)
1176 return ret;
1177 }
1178
1179 return 0;
1180}
1181
1182static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1183 struct dma_fence **ef)
1184{
1185 struct amdkfd_process_info *info = NULL;
1186 int ret;
1187
1188 if (!*process_info) {
1189 info = kzalloc(sizeof(*info), GFP_KERNEL);
1190 if (!info)
1191 return -ENOMEM;
1192
1193 mutex_init(&info->lock);
1194 INIT_LIST_HEAD(&info->vm_list_head);
1195 INIT_LIST_HEAD(&info->kfd_bo_list);
1196 INIT_LIST_HEAD(&info->userptr_valid_list);
1197 INIT_LIST_HEAD(&info->userptr_inval_list);
1198
1199 info->eviction_fence =
1200 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1201 current->mm,
1202 NULL);
1203 if (!info->eviction_fence) {
1204 pr_err("Failed to create eviction fence\n");
1205 ret = -ENOMEM;
1206 goto create_evict_fence_fail;
1207 }
1208
1209 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1210 atomic_set(&info->evicted_bos, 0);
1211 INIT_DELAYED_WORK(&info->restore_userptr_work,
1212 amdgpu_amdkfd_restore_userptr_worker);
1213
1214 *process_info = info;
1215 *ef = dma_fence_get(&info->eviction_fence->base);
1216 }
1217
1218 vm->process_info = *process_info;
1219
1220
1221 ret = amdgpu_bo_reserve(vm->root.bo, true);
1222 if (ret)
1223 goto reserve_pd_fail;
1224 ret = vm_validate_pt_pd_bos(vm);
1225 if (ret) {
1226 pr_err("validate_pt_pd_bos() failed\n");
1227 goto validate_pd_fail;
1228 }
1229 ret = amdgpu_bo_sync_wait(vm->root.bo,
1230 AMDGPU_FENCE_OWNER_KFD, false);
1231 if (ret)
1232 goto wait_pd_fail;
1233 ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1234 if (ret)
1235 goto reserve_shared_fail;
1236 amdgpu_bo_fence(vm->root.bo,
1237 &vm->process_info->eviction_fence->base, true);
1238 amdgpu_bo_unreserve(vm->root.bo);
1239
1240
1241 mutex_lock(&vm->process_info->lock);
1242 list_add_tail(&vm->vm_list_node,
1243 &(vm->process_info->vm_list_head));
1244 vm->process_info->n_vms++;
1245 mutex_unlock(&vm->process_info->lock);
1246
1247 return 0;
1248
1249reserve_shared_fail:
1250wait_pd_fail:
1251validate_pd_fail:
1252 amdgpu_bo_unreserve(vm->root.bo);
1253reserve_pd_fail:
1254 vm->process_info = NULL;
1255 if (info) {
1256
1257 dma_fence_put(&info->eviction_fence->base);
1258 dma_fence_put(*ef);
1259 *ef = NULL;
1260 *process_info = NULL;
1261 put_pid(info->pid);
1262create_evict_fence_fail:
1263 mutex_destroy(&info->lock);
1264 kfree(info);
1265 }
1266 return ret;
1267}
1268
1269int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1270 struct file *filp, u32 pasid,
1271 void **process_info,
1272 struct dma_fence **ef)
1273{
1274 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1275 struct amdgpu_fpriv *drv_priv;
1276 struct amdgpu_vm *avm;
1277 int ret;
1278
1279 ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1280 if (ret)
1281 return ret;
1282 avm = &drv_priv->vm;
1283
1284
1285 if (avm->process_info)
1286 return -EINVAL;
1287
1288
1289 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1290 if (ret)
1291 return ret;
1292
1293
1294 ret = init_kfd_vm(avm, process_info, ef);
1295 if (ret)
1296 return ret;
1297
1298 amdgpu_vm_set_task_info(avm);
1299
1300 return 0;
1301}
1302
1303void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1304 struct amdgpu_vm *vm)
1305{
1306 struct amdkfd_process_info *process_info = vm->process_info;
1307 struct amdgpu_bo *pd = vm->root.bo;
1308
1309 if (!process_info)
1310 return;
1311
1312
1313 amdgpu_bo_reserve(pd, false);
1314 amdgpu_bo_fence(pd, NULL, false);
1315 amdgpu_bo_unreserve(pd);
1316
1317
1318 mutex_lock(&process_info->lock);
1319 process_info->n_vms--;
1320 list_del(&vm->vm_list_node);
1321 mutex_unlock(&process_info->lock);
1322
1323 vm->process_info = NULL;
1324
1325
1326 if (!process_info->n_vms) {
1327 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1328 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1329 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1330
1331 dma_fence_put(&process_info->eviction_fence->base);
1332 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1333 put_pid(process_info->pid);
1334 mutex_destroy(&process_info->lock);
1335 kfree(process_info);
1336 }
1337}
1338
1339void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1340{
1341 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1342 struct amdgpu_vm *avm;
1343
1344 if (WARN_ON(!kgd || !drm_priv))
1345 return;
1346
1347 avm = drm_priv_to_vm(drm_priv);
1348
1349 pr_debug("Releasing process vm %p\n", avm);
1350
1351
1352
1353
1354
1355
1356
1357 amdgpu_vm_release_compute(adev, avm);
1358}
1359
1360uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1361{
1362 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1363 struct amdgpu_bo *pd = avm->root.bo;
1364 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1365
1366 if (adev->asic_type < CHIP_VEGA10)
1367 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1368 return avm->pd_phys_addr;
1369}
1370
1371int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1372 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1373 void *drm_priv, struct kgd_mem **mem,
1374 uint64_t *offset, uint32_t flags)
1375{
1376 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1377 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1378 enum ttm_bo_type bo_type = ttm_bo_type_device;
1379 struct sg_table *sg = NULL;
1380 uint64_t user_addr = 0;
1381 struct amdgpu_bo *bo;
1382 struct drm_gem_object *gobj;
1383 u32 domain, alloc_domain;
1384 u64 alloc_flags;
1385 int ret;
1386
1387
1388
1389
1390 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1391 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1392 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1393 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1394 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1395 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1396 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1397 alloc_flags = 0;
1398 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1399 domain = AMDGPU_GEM_DOMAIN_GTT;
1400 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1401 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1402 if (!offset || !*offset)
1403 return -EINVAL;
1404 user_addr = untagged_addr(*offset);
1405 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1406 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1407 domain = AMDGPU_GEM_DOMAIN_GTT;
1408 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1409 bo_type = ttm_bo_type_sg;
1410 alloc_flags = 0;
1411 if (size > UINT_MAX)
1412 return -EINVAL;
1413 sg = create_doorbell_sg(*offset, size);
1414 if (!sg)
1415 return -ENOMEM;
1416 } else {
1417 return -EINVAL;
1418 }
1419
1420 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1421 if (!*mem) {
1422 ret = -ENOMEM;
1423 goto err;
1424 }
1425 INIT_LIST_HEAD(&(*mem)->attachments);
1426 mutex_init(&(*mem)->lock);
1427 (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1428
1429
1430
1431
1432
1433 if ((*mem)->aql_queue)
1434 size = size >> 1;
1435
1436 (*mem)->alloc_flags = flags;
1437
1438 amdgpu_sync_create(&(*mem)->sync);
1439
1440 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1441 if (ret) {
1442 pr_debug("Insufficient memory\n");
1443 goto err_reserve_limit;
1444 }
1445
1446 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1447 va, size, domain_string(alloc_domain));
1448
1449 ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1450 bo_type, NULL, &gobj);
1451 if (ret) {
1452 pr_debug("Failed to create BO on domain %s. ret %d\n",
1453 domain_string(alloc_domain), ret);
1454 goto err_bo_create;
1455 }
1456 ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1457 if (ret) {
1458 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1459 goto err_node_allow;
1460 }
1461 bo = gem_to_amdgpu_bo(gobj);
1462 if (bo_type == ttm_bo_type_sg) {
1463 bo->tbo.sg = sg;
1464 bo->tbo.ttm->sg = sg;
1465 }
1466 bo->kfd_bo = *mem;
1467 (*mem)->bo = bo;
1468 if (user_addr)
1469 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1470
1471 (*mem)->va = va;
1472 (*mem)->domain = domain;
1473 (*mem)->mapped_to_gpu_memory = 0;
1474 (*mem)->process_info = avm->process_info;
1475 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1476
1477 if (user_addr) {
1478 ret = init_user_pages(*mem, user_addr);
1479 if (ret)
1480 goto allocate_init_user_pages_failed;
1481 }
1482
1483 if (offset)
1484 *offset = amdgpu_bo_mmap_offset(bo);
1485
1486 return 0;
1487
1488allocate_init_user_pages_failed:
1489 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1490 drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1491err_node_allow:
1492 amdgpu_bo_unref(&bo);
1493
1494 goto err_reserve_limit;
1495err_bo_create:
1496 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1497err_reserve_limit:
1498 mutex_destroy(&(*mem)->lock);
1499 kfree(*mem);
1500err:
1501 if (sg) {
1502 sg_free_table(sg);
1503 kfree(sg);
1504 }
1505 return ret;
1506}
1507
1508int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1509 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1510 uint64_t *size)
1511{
1512 struct amdkfd_process_info *process_info = mem->process_info;
1513 unsigned long bo_size = mem->bo->tbo.base.size;
1514 struct kfd_mem_attachment *entry, *tmp;
1515 struct bo_vm_reservation_context ctx;
1516 struct ttm_validate_buffer *bo_list_entry;
1517 unsigned int mapped_to_gpu_memory;
1518 int ret;
1519 bool is_imported = false;
1520
1521 mutex_lock(&mem->lock);
1522 mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1523 is_imported = mem->is_imported;
1524 mutex_unlock(&mem->lock);
1525
1526
1527
1528
1529 if (mapped_to_gpu_memory > 0) {
1530 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1531 mem->va, bo_size);
1532 return -EBUSY;
1533 }
1534
1535
1536 bo_list_entry = &mem->validate_list;
1537 mutex_lock(&process_info->lock);
1538 list_del(&bo_list_entry->head);
1539 mutex_unlock(&process_info->lock);
1540
1541
1542 amdgpu_mn_unregister(mem->bo);
1543
1544 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1545 if (unlikely(ret))
1546 return ret;
1547
1548
1549
1550
1551
1552 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1553 process_info->eviction_fence);
1554 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1555 mem->va + bo_size * (1 + mem->aql_queue));
1556
1557 ret = unreserve_bo_and_vms(&ctx, false, false);
1558
1559
1560 list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1561 kfd_mem_detach(entry);
1562
1563
1564 amdgpu_sync_free(&mem->sync);
1565
1566
1567
1568
1569 if (mem->bo->tbo.sg) {
1570 sg_free_table(mem->bo->tbo.sg);
1571 kfree(mem->bo->tbo.sg);
1572 }
1573
1574
1575
1576
1577 if (size) {
1578 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1579 (!is_imported))
1580 *size = bo_size;
1581 else
1582 *size = 0;
1583 }
1584
1585
1586 drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1587 if (mem->dmabuf)
1588 dma_buf_put(mem->dmabuf);
1589 drm_gem_object_put(&mem->bo->tbo.base);
1590 mutex_destroy(&mem->lock);
1591 kfree(mem);
1592
1593 return ret;
1594}
1595
1596int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1597 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1598{
1599 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1600 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1601 int ret;
1602 struct amdgpu_bo *bo;
1603 uint32_t domain;
1604 struct kfd_mem_attachment *entry;
1605 struct bo_vm_reservation_context ctx;
1606 unsigned long bo_size;
1607 bool is_invalid_userptr = false;
1608
1609 bo = mem->bo;
1610 if (!bo) {
1611 pr_err("Invalid BO when mapping memory to GPU\n");
1612 return -EINVAL;
1613 }
1614
1615
1616
1617
1618
1619 mutex_lock(&mem->process_info->lock);
1620
1621
1622
1623
1624
1625 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1626 mmap_write_lock(current->mm);
1627 is_invalid_userptr = atomic_read(&mem->invalid);
1628 mmap_write_unlock(current->mm);
1629 }
1630
1631 mutex_lock(&mem->lock);
1632
1633 domain = mem->domain;
1634 bo_size = bo->tbo.base.size;
1635
1636 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1637 mem->va,
1638 mem->va + bo_size * (1 + mem->aql_queue),
1639 avm, domain_string(domain));
1640
1641 if (!kfd_mem_is_attached(avm, mem)) {
1642 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1643 if (ret)
1644 goto out;
1645 }
1646
1647 ret = reserve_bo_and_vm(mem, avm, &ctx);
1648 if (unlikely(ret))
1649 goto out;
1650
1651
1652
1653
1654
1655
1656 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1657 bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1658 is_invalid_userptr = true;
1659
1660 ret = vm_validate_pt_pd_bos(avm);
1661 if (unlikely(ret))
1662 goto out_unreserve;
1663
1664 if (mem->mapped_to_gpu_memory == 0 &&
1665 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1666
1667
1668
1669
1670 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1671 if (ret) {
1672 pr_debug("Validate failed\n");
1673 goto out_unreserve;
1674 }
1675 }
1676
1677 list_for_each_entry(entry, &mem->attachments, list) {
1678 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1679 continue;
1680
1681 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1682 entry->va, entry->va + bo_size, entry);
1683
1684 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1685 is_invalid_userptr);
1686 if (ret) {
1687 pr_err("Failed to map bo to gpuvm\n");
1688 goto out_unreserve;
1689 }
1690
1691 ret = vm_update_pds(avm, ctx.sync);
1692 if (ret) {
1693 pr_err("Failed to update page directories\n");
1694 goto out_unreserve;
1695 }
1696
1697 entry->is_mapped = true;
1698 mem->mapped_to_gpu_memory++;
1699 pr_debug("\t INC mapping count %d\n",
1700 mem->mapped_to_gpu_memory);
1701 }
1702
1703 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1704 amdgpu_bo_fence(bo,
1705 &avm->process_info->eviction_fence->base,
1706 true);
1707 ret = unreserve_bo_and_vms(&ctx, false, false);
1708
1709 goto out;
1710
1711out_unreserve:
1712 unreserve_bo_and_vms(&ctx, false, false);
1713out:
1714 mutex_unlock(&mem->process_info->lock);
1715 mutex_unlock(&mem->lock);
1716 return ret;
1717}
1718
1719int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1720 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1721{
1722 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1723 struct amdkfd_process_info *process_info = avm->process_info;
1724 unsigned long bo_size = mem->bo->tbo.base.size;
1725 struct kfd_mem_attachment *entry;
1726 struct bo_vm_reservation_context ctx;
1727 int ret;
1728
1729 mutex_lock(&mem->lock);
1730
1731 ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1732 if (unlikely(ret))
1733 goto out;
1734
1735 if (ctx.n_vms == 0) {
1736 ret = -EINVAL;
1737 goto unreserve_out;
1738 }
1739
1740 ret = vm_validate_pt_pd_bos(avm);
1741 if (unlikely(ret))
1742 goto unreserve_out;
1743
1744 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1745 mem->va,
1746 mem->va + bo_size * (1 + mem->aql_queue),
1747 avm);
1748
1749 list_for_each_entry(entry, &mem->attachments, list) {
1750 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1751 continue;
1752
1753 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1754 entry->va, entry->va + bo_size, entry);
1755
1756 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1757 entry->is_mapped = false;
1758
1759 mem->mapped_to_gpu_memory--;
1760 pr_debug("\t DEC mapping count %d\n",
1761 mem->mapped_to_gpu_memory);
1762 }
1763
1764
1765
1766
1767 if (mem->mapped_to_gpu_memory == 0 &&
1768 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1769 !mem->bo->tbo.pin_count)
1770 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1771 process_info->eviction_fence);
1772
1773unreserve_out:
1774 unreserve_bo_and_vms(&ctx, false, false);
1775out:
1776 mutex_unlock(&mem->lock);
1777 return ret;
1778}
1779
1780int amdgpu_amdkfd_gpuvm_sync_memory(
1781 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1782{
1783 struct amdgpu_sync sync;
1784 int ret;
1785
1786 amdgpu_sync_create(&sync);
1787
1788 mutex_lock(&mem->lock);
1789 amdgpu_sync_clone(&mem->sync, &sync);
1790 mutex_unlock(&mem->lock);
1791
1792 ret = amdgpu_sync_wait(&sync, intr);
1793 amdgpu_sync_free(&sync);
1794 return ret;
1795}
1796
1797int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1798 struct kgd_mem *mem, void **kptr, uint64_t *size)
1799{
1800 int ret;
1801 struct amdgpu_bo *bo = mem->bo;
1802
1803 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1804 pr_err("userptr can't be mapped to kernel\n");
1805 return -EINVAL;
1806 }
1807
1808
1809
1810
1811 mutex_lock(&mem->process_info->lock);
1812
1813 ret = amdgpu_bo_reserve(bo, true);
1814 if (ret) {
1815 pr_err("Failed to reserve bo. ret %d\n", ret);
1816 goto bo_reserve_failed;
1817 }
1818
1819 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1820 if (ret) {
1821 pr_err("Failed to pin bo. ret %d\n", ret);
1822 goto pin_failed;
1823 }
1824
1825 ret = amdgpu_bo_kmap(bo, kptr);
1826 if (ret) {
1827 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1828 goto kmap_failed;
1829 }
1830
1831 amdgpu_amdkfd_remove_eviction_fence(
1832 bo, mem->process_info->eviction_fence);
1833 list_del_init(&mem->validate_list.head);
1834
1835 if (size)
1836 *size = amdgpu_bo_size(bo);
1837
1838 amdgpu_bo_unreserve(bo);
1839
1840 mutex_unlock(&mem->process_info->lock);
1841 return 0;
1842
1843kmap_failed:
1844 amdgpu_bo_unpin(bo);
1845pin_failed:
1846 amdgpu_bo_unreserve(bo);
1847bo_reserve_failed:
1848 mutex_unlock(&mem->process_info->lock);
1849
1850 return ret;
1851}
1852
1853int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1854 struct kfd_vm_fault_info *mem)
1855{
1856 struct amdgpu_device *adev;
1857
1858 adev = (struct amdgpu_device *)kgd;
1859 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1860 *mem = *adev->gmc.vm_fault_info;
1861 mb();
1862 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1863 }
1864 return 0;
1865}
1866
1867int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1868 struct dma_buf *dma_buf,
1869 uint64_t va, void *drm_priv,
1870 struct kgd_mem **mem, uint64_t *size,
1871 uint64_t *mmap_offset)
1872{
1873 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1874 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1875 struct drm_gem_object *obj;
1876 struct amdgpu_bo *bo;
1877 int ret;
1878
1879 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1880
1881 return -EINVAL;
1882
1883 obj = dma_buf->priv;
1884 if (drm_to_adev(obj->dev) != adev)
1885
1886 return -EINVAL;
1887
1888 bo = gem_to_amdgpu_bo(obj);
1889 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1890 AMDGPU_GEM_DOMAIN_GTT)))
1891
1892 return -EINVAL;
1893
1894 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1895 if (!*mem)
1896 return -ENOMEM;
1897
1898 ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1899 if (ret) {
1900 kfree(mem);
1901 return ret;
1902 }
1903
1904 if (size)
1905 *size = amdgpu_bo_size(bo);
1906
1907 if (mmap_offset)
1908 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1909
1910 INIT_LIST_HEAD(&(*mem)->attachments);
1911 mutex_init(&(*mem)->lock);
1912
1913 (*mem)->alloc_flags =
1914 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1915 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1916 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1917 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1918
1919 drm_gem_object_get(&bo->tbo.base);
1920 (*mem)->bo = bo;
1921 (*mem)->va = va;
1922 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1923 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1924 (*mem)->mapped_to_gpu_memory = 0;
1925 (*mem)->process_info = avm->process_info;
1926 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1927 amdgpu_sync_create(&(*mem)->sync);
1928 (*mem)->is_imported = true;
1929
1930 return 0;
1931}
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1945 struct mm_struct *mm)
1946{
1947 struct amdkfd_process_info *process_info = mem->process_info;
1948 int evicted_bos;
1949 int r = 0;
1950
1951 atomic_inc(&mem->invalid);
1952 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1953 if (evicted_bos == 1) {
1954
1955 r = kgd2kfd_quiesce_mm(mm);
1956 if (r)
1957 pr_err("Failed to quiesce KFD\n");
1958 schedule_delayed_work(&process_info->restore_userptr_work,
1959 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1960 }
1961
1962 return r;
1963}
1964
1965
1966
1967
1968
1969
1970
1971static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1972 struct mm_struct *mm)
1973{
1974 struct kgd_mem *mem, *tmp_mem;
1975 struct amdgpu_bo *bo;
1976 struct ttm_operation_ctx ctx = { false, false };
1977 int invalid, ret;
1978
1979
1980
1981
1982 list_for_each_entry_safe(mem, tmp_mem,
1983 &process_info->userptr_valid_list,
1984 validate_list.head) {
1985 if (!atomic_read(&mem->invalid))
1986 continue;
1987
1988 bo = mem->bo;
1989
1990 if (amdgpu_bo_reserve(bo, true))
1991 return -EAGAIN;
1992 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1993 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1994 amdgpu_bo_unreserve(bo);
1995 if (ret) {
1996 pr_err("%s: Failed to invalidate userptr BO\n",
1997 __func__);
1998 return -EAGAIN;
1999 }
2000
2001 list_move_tail(&mem->validate_list.head,
2002 &process_info->userptr_inval_list);
2003 }
2004
2005 if (list_empty(&process_info->userptr_inval_list))
2006 return 0;
2007
2008
2009 list_for_each_entry(mem, &process_info->userptr_inval_list,
2010 validate_list.head) {
2011 invalid = atomic_read(&mem->invalid);
2012 if (!invalid)
2013
2014
2015
2016 continue;
2017
2018 bo = mem->bo;
2019
2020
2021 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2022 if (ret) {
2023 pr_debug("%s: Failed to get user pages: %d\n",
2024 __func__, ret);
2025
2026
2027 return ret;
2028 }
2029
2030
2031
2032
2033
2034 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2035
2036
2037
2038
2039 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2040 return -EAGAIN;
2041 }
2042
2043 return 0;
2044}
2045
2046
2047
2048
2049
2050
2051
2052static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2053{
2054 struct amdgpu_bo_list_entry *pd_bo_list_entries;
2055 struct list_head resv_list, duplicates;
2056 struct ww_acquire_ctx ticket;
2057 struct amdgpu_sync sync;
2058
2059 struct amdgpu_vm *peer_vm;
2060 struct kgd_mem *mem, *tmp_mem;
2061 struct amdgpu_bo *bo;
2062 struct ttm_operation_ctx ctx = { false, false };
2063 int i, ret;
2064
2065 pd_bo_list_entries = kcalloc(process_info->n_vms,
2066 sizeof(struct amdgpu_bo_list_entry),
2067 GFP_KERNEL);
2068 if (!pd_bo_list_entries) {
2069 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2070 ret = -ENOMEM;
2071 goto out_no_mem;
2072 }
2073
2074 INIT_LIST_HEAD(&resv_list);
2075 INIT_LIST_HEAD(&duplicates);
2076
2077
2078 i = 0;
2079 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2080 vm_list_node)
2081 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2082 &pd_bo_list_entries[i++]);
2083
2084 list_for_each_entry(mem, &process_info->userptr_inval_list,
2085 validate_list.head) {
2086 list_add_tail(&mem->resv_list.head, &resv_list);
2087 mem->resv_list.bo = mem->validate_list.bo;
2088 mem->resv_list.num_shared = mem->validate_list.num_shared;
2089 }
2090
2091
2092 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2093 WARN(!list_empty(&duplicates), "Duplicates should be empty");
2094 if (ret)
2095 goto out_free;
2096
2097 amdgpu_sync_create(&sync);
2098
2099 ret = process_validate_vms(process_info);
2100 if (ret)
2101 goto unreserve_out;
2102
2103
2104 list_for_each_entry_safe(mem, tmp_mem,
2105 &process_info->userptr_inval_list,
2106 validate_list.head) {
2107 struct kfd_mem_attachment *attachment;
2108
2109 bo = mem->bo;
2110
2111
2112 if (bo->tbo.ttm->pages[0]) {
2113 amdgpu_bo_placement_from_domain(bo, mem->domain);
2114 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2115 if (ret) {
2116 pr_err("%s: failed to validate BO\n", __func__);
2117 goto unreserve_out;
2118 }
2119 }
2120
2121 list_move_tail(&mem->validate_list.head,
2122 &process_info->userptr_valid_list);
2123
2124
2125
2126
2127
2128
2129
2130 list_for_each_entry(attachment, &mem->attachments, list) {
2131 if (!attachment->is_mapped)
2132 continue;
2133
2134 kfd_mem_dmaunmap_attachment(mem, attachment);
2135 ret = update_gpuvm_pte(mem, attachment, &sync);
2136 if (ret) {
2137 pr_err("%s: update PTE failed\n", __func__);
2138
2139 atomic_inc(&mem->invalid);
2140 goto unreserve_out;
2141 }
2142 }
2143 }
2144
2145
2146 ret = process_update_pds(process_info, &sync);
2147
2148unreserve_out:
2149 ttm_eu_backoff_reservation(&ticket, &resv_list);
2150 amdgpu_sync_wait(&sync, false);
2151 amdgpu_sync_free(&sync);
2152out_free:
2153 kfree(pd_bo_list_entries);
2154out_no_mem:
2155
2156 return ret;
2157}
2158
2159
2160
2161
2162
2163
2164
2165static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2166{
2167 struct delayed_work *dwork = to_delayed_work(work);
2168 struct amdkfd_process_info *process_info =
2169 container_of(dwork, struct amdkfd_process_info,
2170 restore_userptr_work);
2171 struct task_struct *usertask;
2172 struct mm_struct *mm;
2173 int evicted_bos;
2174
2175 evicted_bos = atomic_read(&process_info->evicted_bos);
2176 if (!evicted_bos)
2177 return;
2178
2179
2180 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2181 if (!usertask)
2182 return;
2183 mm = get_task_mm(usertask);
2184 if (!mm) {
2185 put_task_struct(usertask);
2186 return;
2187 }
2188
2189 mutex_lock(&process_info->lock);
2190
2191 if (update_invalid_user_pages(process_info, mm))
2192 goto unlock_out;
2193
2194
2195
2196
2197 if (!list_empty(&process_info->userptr_inval_list)) {
2198 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2199 goto unlock_out;
2200
2201 if (validate_invalid_user_pages(process_info))
2202 goto unlock_out;
2203 }
2204
2205
2206
2207
2208
2209 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2210 evicted_bos)
2211 goto unlock_out;
2212 evicted_bos = 0;
2213 if (kgd2kfd_resume_mm(mm)) {
2214 pr_err("%s: Failed to resume KFD\n", __func__);
2215
2216
2217
2218 }
2219
2220unlock_out:
2221 mutex_unlock(&process_info->lock);
2222 mmput(mm);
2223 put_task_struct(usertask);
2224
2225
2226 if (evicted_bos)
2227 schedule_delayed_work(&process_info->restore_userptr_work,
2228 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2250{
2251 struct amdgpu_bo_list_entry *pd_bo_list;
2252 struct amdkfd_process_info *process_info = info;
2253 struct amdgpu_vm *peer_vm;
2254 struct kgd_mem *mem;
2255 struct bo_vm_reservation_context ctx;
2256 struct amdgpu_amdkfd_fence *new_fence;
2257 int ret = 0, i;
2258 struct list_head duplicate_save;
2259 struct amdgpu_sync sync_obj;
2260 unsigned long failed_size = 0;
2261 unsigned long total_size = 0;
2262
2263 INIT_LIST_HEAD(&duplicate_save);
2264 INIT_LIST_HEAD(&ctx.list);
2265 INIT_LIST_HEAD(&ctx.duplicates);
2266
2267 pd_bo_list = kcalloc(process_info->n_vms,
2268 sizeof(struct amdgpu_bo_list_entry),
2269 GFP_KERNEL);
2270 if (!pd_bo_list)
2271 return -ENOMEM;
2272
2273 i = 0;
2274 mutex_lock(&process_info->lock);
2275 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2276 vm_list_node)
2277 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2278
2279
2280
2281
2282 list_for_each_entry(mem, &process_info->kfd_bo_list,
2283 validate_list.head) {
2284
2285 list_add_tail(&mem->resv_list.head, &ctx.list);
2286 mem->resv_list.bo = mem->validate_list.bo;
2287 mem->resv_list.num_shared = mem->validate_list.num_shared;
2288 }
2289
2290 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2291 false, &duplicate_save);
2292 if (ret) {
2293 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2294 goto ttm_reserve_fail;
2295 }
2296
2297 amdgpu_sync_create(&sync_obj);
2298
2299
2300 ret = process_validate_vms(process_info);
2301 if (ret)
2302 goto validate_map_fail;
2303
2304 ret = process_sync_pds_resv(process_info, &sync_obj);
2305 if (ret) {
2306 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2307 goto validate_map_fail;
2308 }
2309
2310
2311 list_for_each_entry(mem, &process_info->kfd_bo_list,
2312 validate_list.head) {
2313
2314 struct amdgpu_bo *bo = mem->bo;
2315 uint32_t domain = mem->domain;
2316 struct kfd_mem_attachment *attachment;
2317
2318 total_size += amdgpu_bo_size(bo);
2319
2320 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2321 if (ret) {
2322 pr_debug("Memory eviction: Validate BOs failed\n");
2323 failed_size += amdgpu_bo_size(bo);
2324 ret = amdgpu_amdkfd_bo_validate(bo,
2325 AMDGPU_GEM_DOMAIN_GTT, false);
2326 if (ret) {
2327 pr_debug("Memory eviction: Try again\n");
2328 goto validate_map_fail;
2329 }
2330 }
2331 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2332 if (ret) {
2333 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2334 goto validate_map_fail;
2335 }
2336 list_for_each_entry(attachment, &mem->attachments, list) {
2337 if (!attachment->is_mapped)
2338 continue;
2339
2340 kfd_mem_dmaunmap_attachment(mem, attachment);
2341 ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2342 if (ret) {
2343 pr_debug("Memory eviction: update PTE failed. Try again\n");
2344 goto validate_map_fail;
2345 }
2346 }
2347 }
2348
2349 if (failed_size)
2350 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2351
2352
2353 ret = process_update_pds(process_info, &sync_obj);
2354 if (ret) {
2355 pr_debug("Memory eviction: update PDs failed. Try again\n");
2356 goto validate_map_fail;
2357 }
2358
2359
2360 amdgpu_sync_wait(&sync_obj, false);
2361
2362
2363
2364
2365
2366 new_fence = amdgpu_amdkfd_fence_create(
2367 process_info->eviction_fence->base.context,
2368 process_info->eviction_fence->mm,
2369 NULL);
2370 if (!new_fence) {
2371 pr_err("Failed to create eviction fence\n");
2372 ret = -ENOMEM;
2373 goto validate_map_fail;
2374 }
2375 dma_fence_put(&process_info->eviction_fence->base);
2376 process_info->eviction_fence = new_fence;
2377 *ef = dma_fence_get(&new_fence->base);
2378
2379
2380 list_for_each_entry(mem, &process_info->kfd_bo_list,
2381 validate_list.head)
2382 amdgpu_bo_fence(mem->bo,
2383 &process_info->eviction_fence->base, true);
2384
2385
2386 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2387 vm_list_node) {
2388 struct amdgpu_bo *bo = peer_vm->root.bo;
2389
2390 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2391 }
2392
2393validate_map_fail:
2394 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2395 amdgpu_sync_free(&sync_obj);
2396ttm_reserve_fail:
2397 mutex_unlock(&process_info->lock);
2398 kfree(pd_bo_list);
2399 return ret;
2400}
2401
2402int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2403{
2404 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2405 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2406 int ret;
2407
2408 if (!info || !gws)
2409 return -EINVAL;
2410
2411 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2412 if (!*mem)
2413 return -ENOMEM;
2414
2415 mutex_init(&(*mem)->lock);
2416 INIT_LIST_HEAD(&(*mem)->attachments);
2417 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2418 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2419 (*mem)->process_info = process_info;
2420 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2421 amdgpu_sync_create(&(*mem)->sync);
2422
2423
2424
2425 mutex_lock(&(*mem)->process_info->lock);
2426 ret = amdgpu_bo_reserve(gws_bo, false);
2427 if (unlikely(ret)) {
2428 pr_err("Reserve gws bo failed %d\n", ret);
2429 goto bo_reservation_failure;
2430 }
2431
2432 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2433 if (ret) {
2434 pr_err("GWS BO validate failed %d\n", ret);
2435 goto bo_validation_failure;
2436 }
2437
2438
2439
2440
2441 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2442 if (ret)
2443 goto reserve_shared_fail;
2444 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2445 amdgpu_bo_unreserve(gws_bo);
2446 mutex_unlock(&(*mem)->process_info->lock);
2447
2448 return ret;
2449
2450reserve_shared_fail:
2451bo_validation_failure:
2452 amdgpu_bo_unreserve(gws_bo);
2453bo_reservation_failure:
2454 mutex_unlock(&(*mem)->process_info->lock);
2455 amdgpu_sync_free(&(*mem)->sync);
2456 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2457 amdgpu_bo_unref(&gws_bo);
2458 mutex_destroy(&(*mem)->lock);
2459 kfree(*mem);
2460 *mem = NULL;
2461 return ret;
2462}
2463
2464int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2465{
2466 int ret;
2467 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2468 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2469 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2470
2471
2472
2473
2474 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2475
2476 ret = amdgpu_bo_reserve(gws_bo, false);
2477 if (unlikely(ret)) {
2478 pr_err("Reserve gws bo failed %d\n", ret);
2479
2480 return ret;
2481 }
2482 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2483 process_info->eviction_fence);
2484 amdgpu_bo_unreserve(gws_bo);
2485 amdgpu_sync_free(&kgd_mem->sync);
2486 amdgpu_bo_unref(&gws_bo);
2487 mutex_destroy(&kgd_mem->lock);
2488 kfree(mem);
2489 return 0;
2490}
2491
2492
2493int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2494 struct tile_config *config)
2495{
2496 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2497
2498 config->gb_addr_config = adev->gfx.config.gb_addr_config;
2499 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2500 config->num_tile_configs =
2501 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2502 config->macro_tile_config_ptr =
2503 adev->gfx.config.macrotile_mode_array;
2504 config->num_macro_tile_configs =
2505 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2506
2507
2508 config->num_banks = adev->gfx.config.num_banks;
2509 config->num_ranks = adev->gfx.config.num_ranks;
2510
2511 return 0;
2512}
2513