1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define pr_fmt(fmt) "kfd2kgd: " fmt
24
25#include <linux/list.h>
26#include <linux/pagemap.h>
27#include <linux/sched/mm.h>
28#include <drm/drmP.h>
29#include "amdgpu_object.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_amdkfd.h"
32
33
34
35
36#define VI_BO_SIZE_ALIGN (0x8000)
37
38
39#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
40
41
42
43
44#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45
46
47static struct {
48 uint64_t max_system_mem_limit;
49 uint64_t max_userptr_mem_limit;
50 int64_t system_mem_used;
51 int64_t userptr_mem_used;
52 spinlock_t mem_limit_lock;
53} kfd_mem_limit;
54
55
56struct amdgpu_vm_parser {
57 uint32_t domain;
58 bool wait;
59};
60
61static const char * const domain_bit_to_string[] = {
62 "CPU",
63 "GTT",
64 "VRAM",
65 "GDS",
66 "GWS",
67 "OA"
68};
69
70#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
71
72static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
73
74
75static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
76{
77 return (struct amdgpu_device *)kgd;
78}
79
80static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
81 struct kgd_mem *mem)
82{
83 struct kfd_bo_va_list *entry;
84
85 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
86 if (entry->bo_va->base.vm == avm)
87 return false;
88
89 return true;
90}
91
92
93
94
95
96void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
97{
98 struct sysinfo si;
99 uint64_t mem;
100
101 si_meminfo(&si);
102 mem = si.totalram - si.totalhigh;
103 mem *= si.mem_unit;
104
105 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
106 kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
107 kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
108 pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
109 (kfd_mem_limit.max_system_mem_limit >> 20),
110 (kfd_mem_limit.max_userptr_mem_limit >> 20));
111}
112
113static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
114 uint64_t size, u32 domain)
115{
116 size_t acc_size;
117 int ret = 0;
118
119 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
120 sizeof(struct amdgpu_bo));
121
122 spin_lock(&kfd_mem_limit.mem_limit_lock);
123 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
124 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
125 kfd_mem_limit.max_system_mem_limit) {
126 ret = -ENOMEM;
127 goto err_no_mem;
128 }
129 kfd_mem_limit.system_mem_used += (acc_size + size);
130 } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
131 if ((kfd_mem_limit.system_mem_used + acc_size >
132 kfd_mem_limit.max_system_mem_limit) ||
133 (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
134 kfd_mem_limit.max_userptr_mem_limit)) {
135 ret = -ENOMEM;
136 goto err_no_mem;
137 }
138 kfd_mem_limit.system_mem_used += acc_size;
139 kfd_mem_limit.userptr_mem_used += size;
140 }
141err_no_mem:
142 spin_unlock(&kfd_mem_limit.mem_limit_lock);
143 return ret;
144}
145
146static void unreserve_system_mem_limit(struct amdgpu_device *adev,
147 uint64_t size, u32 domain)
148{
149 size_t acc_size;
150
151 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
152 sizeof(struct amdgpu_bo));
153
154 spin_lock(&kfd_mem_limit.mem_limit_lock);
155 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
156 kfd_mem_limit.system_mem_used -= (acc_size + size);
157 } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
158 kfd_mem_limit.system_mem_used -= acc_size;
159 kfd_mem_limit.userptr_mem_used -= size;
160 }
161 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
162 "kfd system memory accounting unbalanced");
163 WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
164 "kfd userptr memory accounting unbalanced");
165
166 spin_unlock(&kfd_mem_limit.mem_limit_lock);
167}
168
169void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
170{
171 spin_lock(&kfd_mem_limit.mem_limit_lock);
172
173 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
174 kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
175 kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
176 } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
177 kfd_mem_limit.system_mem_used -=
178 (bo->tbo.acc_size + amdgpu_bo_size(bo));
179 }
180 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
181 "kfd system memory accounting unbalanced");
182 WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
183 "kfd userptr memory accounting unbalanced");
184
185 spin_unlock(&kfd_mem_limit.mem_limit_lock);
186}
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
205 struct amdgpu_amdkfd_fence *ef,
206 struct amdgpu_amdkfd_fence ***ef_list,
207 unsigned int *ef_count)
208{
209 struct reservation_object_list *fobj;
210 struct reservation_object *resv;
211 unsigned int i = 0, j = 0, k = 0, shared_count;
212 unsigned int count = 0;
213 struct amdgpu_amdkfd_fence **fence_list;
214
215 if (!ef && !ef_list)
216 return -EINVAL;
217
218 if (ef_list) {
219 *ef_list = NULL;
220 *ef_count = 0;
221 }
222
223 resv = bo->tbo.resv;
224 fobj = reservation_object_get_list(resv);
225
226 if (!fobj)
227 return 0;
228
229 preempt_disable();
230 write_seqcount_begin(&resv->seq);
231
232
233
234
235
236
237 shared_count = fobj->shared_count;
238 for (i = 0; i < shared_count; ++i) {
239 struct dma_fence *f;
240
241 f = rcu_dereference_protected(fobj->shared[i],
242 reservation_object_held(resv));
243
244 if (ef) {
245 if (f->context == ef->base.context) {
246 dma_fence_put(f);
247 fobj->shared_count--;
248 } else {
249 RCU_INIT_POINTER(fobj->shared[j++], f);
250 }
251 } else if (to_amdgpu_amdkfd_fence(f))
252 count++;
253 }
254 write_seqcount_end(&resv->seq);
255 preempt_enable();
256
257 if (ef || !count)
258 return 0;
259
260
261
262
263 fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
264 GFP_KERNEL);
265 if (!fence_list)
266 return -ENOMEM;
267
268 preempt_disable();
269 write_seqcount_begin(&resv->seq);
270
271 j = 0;
272 for (i = 0; i < shared_count; ++i) {
273 struct dma_fence *f;
274 struct amdgpu_amdkfd_fence *efence;
275
276 f = rcu_dereference_protected(fobj->shared[i],
277 reservation_object_held(resv));
278
279 efence = to_amdgpu_amdkfd_fence(f);
280 if (efence) {
281 fence_list[k++] = efence;
282 fobj->shared_count--;
283 } else {
284 RCU_INIT_POINTER(fobj->shared[j++], f);
285 }
286 }
287
288 write_seqcount_end(&resv->seq);
289 preempt_enable();
290
291 *ef_list = fence_list;
292 *ef_count = k;
293
294 return 0;
295}
296
297
298
299
300
301
302
303
304
305
306
307static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
308 struct amdgpu_amdkfd_fence **ef_list,
309 unsigned int ef_count)
310{
311 int i;
312
313 if (!ef_list || !ef_count)
314 return;
315
316 for (i = 0; i < ef_count; i++) {
317 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
318
319
320
321 dma_fence_put(&ef_list[i]->base);
322 }
323
324 kfree(ef_list);
325}
326
327static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
328 bool wait)
329{
330 struct ttm_operation_ctx ctx = { false, false };
331 int ret;
332
333 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
334 "Called with userptr BO"))
335 return -EINVAL;
336
337 amdgpu_ttm_placement_from_domain(bo, domain);
338
339 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
340 if (ret)
341 goto validate_fail;
342 if (wait) {
343 struct amdgpu_amdkfd_fence **ef_list;
344 unsigned int ef_count;
345
346 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
347 &ef_count);
348 if (ret)
349 goto validate_fail;
350
351 ttm_bo_wait(&bo->tbo, false, false);
352 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
353 }
354
355validate_fail:
356 return ret;
357}
358
359static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
360{
361 struct amdgpu_vm_parser *p = param;
362
363 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
364}
365
366
367
368
369
370
371
372
373static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
374{
375 struct amdgpu_bo *pd = vm->root.base.bo;
376 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
377 struct amdgpu_vm_parser param;
378 uint64_t addr, flags = AMDGPU_PTE_VALID;
379 int ret;
380
381 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
382 param.wait = false;
383
384 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
385 ¶m);
386 if (ret) {
387 pr_err("amdgpu: failed to validate PT BOs\n");
388 return ret;
389 }
390
391 ret = amdgpu_amdkfd_validate(¶m, pd);
392 if (ret) {
393 pr_err("amdgpu: failed to validate PD\n");
394 return ret;
395 }
396
397 addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
398 amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
399 vm->pd_phys_addr = addr;
400
401 if (vm->use_cpu_for_update) {
402 ret = amdgpu_bo_kmap(pd, NULL);
403 if (ret) {
404 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
405 return ret;
406 }
407 }
408
409 return 0;
410}
411
412static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
413 struct dma_fence *f)
414{
415 int ret = amdgpu_sync_fence(adev, sync, f, false);
416
417
418
419
420
421 if (sync->last_vm_update) {
422 dma_fence_put(sync->last_vm_update);
423 sync->last_vm_update = NULL;
424 }
425
426 return ret;
427}
428
429static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
430{
431 struct amdgpu_bo *pd = vm->root.base.bo;
432 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
433 int ret;
434
435 ret = amdgpu_vm_update_directories(adev, vm);
436 if (ret)
437 return ret;
438
439 return sync_vm_fence(adev, sync, vm->last_update);
440}
441
442
443
444
445
446
447
448
449
450
451
452
453
454static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
455 struct amdgpu_vm *vm, bool is_aql,
456 struct kfd_bo_va_list **p_bo_va_entry)
457{
458 int ret;
459 struct kfd_bo_va_list *bo_va_entry;
460 struct amdgpu_bo *pd = vm->root.base.bo;
461 struct amdgpu_bo *bo = mem->bo;
462 uint64_t va = mem->va;
463 struct list_head *list_bo_va = &mem->bo_va_list;
464 unsigned long bo_size = bo->tbo.mem.size;
465
466 if (!va) {
467 pr_err("Invalid VA when adding BO to VM\n");
468 return -EINVAL;
469 }
470
471 if (is_aql)
472 va += bo_size;
473
474 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
475 if (!bo_va_entry)
476 return -ENOMEM;
477
478 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
479 va + bo_size, vm);
480
481
482 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
483 if (!bo_va_entry->bo_va) {
484 ret = -EINVAL;
485 pr_err("Failed to add BO object to VM. ret == %d\n",
486 ret);
487 goto err_vmadd;
488 }
489
490 bo_va_entry->va = va;
491 bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
492 mem->mapping_flags);
493 bo_va_entry->kgd_dev = (void *)adev;
494 list_add(&bo_va_entry->bo_list, list_bo_va);
495
496 if (p_bo_va_entry)
497 *p_bo_va_entry = bo_va_entry;
498
499
500
501
502
503
504 amdgpu_amdkfd_remove_eviction_fence(pd,
505 vm->process_info->eviction_fence,
506 NULL, NULL);
507
508 ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
509 if (ret) {
510 pr_err("Failed to allocate pts, err=%d\n", ret);
511 goto err_alloc_pts;
512 }
513
514 ret = vm_validate_pt_pd_bos(vm);
515 if (ret) {
516 pr_err("validate_pt_pd_bos() failed\n");
517 goto err_alloc_pts;
518 }
519
520
521 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
522
523 return 0;
524
525err_alloc_pts:
526 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
527 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
528 list_del(&bo_va_entry->bo_list);
529err_vmadd:
530 kfree(bo_va_entry);
531 return ret;
532}
533
534static void remove_bo_from_vm(struct amdgpu_device *adev,
535 struct kfd_bo_va_list *entry, unsigned long size)
536{
537 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
538 entry->va,
539 entry->va + size, entry);
540 amdgpu_vm_bo_rmv(adev, entry->bo_va);
541 list_del(&entry->bo_list);
542 kfree(entry);
543}
544
545static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
546 struct amdkfd_process_info *process_info,
547 bool userptr)
548{
549 struct ttm_validate_buffer *entry = &mem->validate_list;
550 struct amdgpu_bo *bo = mem->bo;
551
552 INIT_LIST_HEAD(&entry->head);
553 entry->shared = true;
554 entry->bo = &bo->tbo;
555 mutex_lock(&process_info->lock);
556 if (userptr)
557 list_add_tail(&entry->head, &process_info->userptr_valid_list);
558 else
559 list_add_tail(&entry->head, &process_info->kfd_bo_list);
560 mutex_unlock(&process_info->lock);
561}
562
563
564
565
566
567
568
569
570
571
572
573
574
575static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
576 uint64_t user_addr)
577{
578 struct amdkfd_process_info *process_info = mem->process_info;
579 struct amdgpu_bo *bo = mem->bo;
580 struct ttm_operation_ctx ctx = { true, false };
581 int ret = 0;
582
583 mutex_lock(&process_info->lock);
584
585 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
586 if (ret) {
587 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
588 goto out;
589 }
590
591 ret = amdgpu_mn_register(bo, user_addr);
592 if (ret) {
593 pr_err("%s: Failed to register MMU notifier: %d\n",
594 __func__, ret);
595 goto out;
596 }
597
598
599
600
601 WARN(mem->user_pages, "Leaking user_pages array");
602
603 mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
604 sizeof(struct page *),
605 GFP_KERNEL | __GFP_ZERO);
606 if (!mem->user_pages) {
607 pr_err("%s: Failed to allocate pages array\n", __func__);
608 ret = -ENOMEM;
609 goto unregister_out;
610 }
611
612 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
613 if (ret) {
614 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
615 goto free_out;
616 }
617
618 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
619
620 ret = amdgpu_bo_reserve(bo, true);
621 if (ret) {
622 pr_err("%s: Failed to reserve BO\n", __func__);
623 goto release_out;
624 }
625 amdgpu_ttm_placement_from_domain(bo, mem->domain);
626 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
627 if (ret)
628 pr_err("%s: failed to validate BO\n", __func__);
629 amdgpu_bo_unreserve(bo);
630
631release_out:
632 if (ret)
633 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
634free_out:
635 kvfree(mem->user_pages);
636 mem->user_pages = NULL;
637unregister_out:
638 if (ret)
639 amdgpu_mn_unregister(bo);
640out:
641 mutex_unlock(&process_info->lock);
642 return ret;
643}
644
645
646
647
648
649
650struct bo_vm_reservation_context {
651 struct amdgpu_bo_list_entry kfd_bo;
652 unsigned int n_vms;
653 struct amdgpu_bo_list_entry *vm_pd;
654 struct ww_acquire_ctx ticket;
655 struct list_head list, duplicates;
656 struct amdgpu_sync *sync;
657 bool reserved;
658};
659
660enum bo_vm_match {
661 BO_VM_NOT_MAPPED = 0,
662 BO_VM_MAPPED,
663 BO_VM_ALL,
664};
665
666
667
668
669
670
671
672static int reserve_bo_and_vm(struct kgd_mem *mem,
673 struct amdgpu_vm *vm,
674 struct bo_vm_reservation_context *ctx)
675{
676 struct amdgpu_bo *bo = mem->bo;
677 int ret;
678
679 WARN_ON(!vm);
680
681 ctx->reserved = false;
682 ctx->n_vms = 1;
683 ctx->sync = &mem->sync;
684
685 INIT_LIST_HEAD(&ctx->list);
686 INIT_LIST_HEAD(&ctx->duplicates);
687
688 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
689 if (!ctx->vm_pd)
690 return -ENOMEM;
691
692 ctx->kfd_bo.robj = bo;
693 ctx->kfd_bo.priority = 0;
694 ctx->kfd_bo.tv.bo = &bo->tbo;
695 ctx->kfd_bo.tv.shared = true;
696 ctx->kfd_bo.user_pages = NULL;
697 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
698
699 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
700
701 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
702 false, &ctx->duplicates);
703 if (!ret)
704 ctx->reserved = true;
705 else {
706 pr_err("Failed to reserve buffers in ttm\n");
707 kfree(ctx->vm_pd);
708 ctx->vm_pd = NULL;
709 }
710
711 return ret;
712}
713
714
715
716
717
718
719
720
721
722
723
724static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
725 struct amdgpu_vm *vm, enum bo_vm_match map_type,
726 struct bo_vm_reservation_context *ctx)
727{
728 struct amdgpu_bo *bo = mem->bo;
729 struct kfd_bo_va_list *entry;
730 unsigned int i;
731 int ret;
732
733 ctx->reserved = false;
734 ctx->n_vms = 0;
735 ctx->vm_pd = NULL;
736 ctx->sync = &mem->sync;
737
738 INIT_LIST_HEAD(&ctx->list);
739 INIT_LIST_HEAD(&ctx->duplicates);
740
741 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
742 if ((vm && vm != entry->bo_va->base.vm) ||
743 (entry->is_mapped != map_type
744 && map_type != BO_VM_ALL))
745 continue;
746
747 ctx->n_vms++;
748 }
749
750 if (ctx->n_vms != 0) {
751 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
752 GFP_KERNEL);
753 if (!ctx->vm_pd)
754 return -ENOMEM;
755 }
756
757 ctx->kfd_bo.robj = bo;
758 ctx->kfd_bo.priority = 0;
759 ctx->kfd_bo.tv.bo = &bo->tbo;
760 ctx->kfd_bo.tv.shared = true;
761 ctx->kfd_bo.user_pages = NULL;
762 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
763
764 i = 0;
765 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
766 if ((vm && vm != entry->bo_va->base.vm) ||
767 (entry->is_mapped != map_type
768 && map_type != BO_VM_ALL))
769 continue;
770
771 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
772 &ctx->vm_pd[i]);
773 i++;
774 }
775
776 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
777 false, &ctx->duplicates);
778 if (!ret)
779 ctx->reserved = true;
780 else
781 pr_err("Failed to reserve buffers in ttm.\n");
782
783 if (ret) {
784 kfree(ctx->vm_pd);
785 ctx->vm_pd = NULL;
786 }
787
788 return ret;
789}
790
791
792
793
794
795
796
797
798
799
800
801static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
802 bool wait, bool intr)
803{
804 int ret = 0;
805
806 if (wait)
807 ret = amdgpu_sync_wait(ctx->sync, intr);
808
809 if (ctx->reserved)
810 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
811 kfree(ctx->vm_pd);
812
813 ctx->sync = NULL;
814
815 ctx->reserved = false;
816 ctx->vm_pd = NULL;
817
818 return ret;
819}
820
821static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
822 struct kfd_bo_va_list *entry,
823 struct amdgpu_sync *sync)
824{
825 struct amdgpu_bo_va *bo_va = entry->bo_va;
826 struct amdgpu_vm *vm = bo_va->base.vm;
827 struct amdgpu_bo *pd = vm->root.base.bo;
828
829
830
831
832
833
834
835 amdgpu_amdkfd_remove_eviction_fence(pd,
836 vm->process_info->eviction_fence,
837 NULL, NULL);
838 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
839
840 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
841
842
843 amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
844
845 sync_vm_fence(adev, sync, bo_va->last_pt_update);
846
847 return 0;
848}
849
850static int update_gpuvm_pte(struct amdgpu_device *adev,
851 struct kfd_bo_va_list *entry,
852 struct amdgpu_sync *sync)
853{
854 int ret;
855 struct amdgpu_vm *vm;
856 struct amdgpu_bo_va *bo_va;
857 struct amdgpu_bo *bo;
858
859 bo_va = entry->bo_va;
860 vm = bo_va->base.vm;
861 bo = bo_va->base.bo;
862
863
864 ret = amdgpu_vm_bo_update(adev, bo_va, false);
865 if (ret) {
866 pr_err("amdgpu_vm_bo_update failed\n");
867 return ret;
868 }
869
870 return sync_vm_fence(adev, sync, bo_va->last_pt_update);
871}
872
873static int map_bo_to_gpuvm(struct amdgpu_device *adev,
874 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
875 bool no_update_pte)
876{
877 int ret;
878
879
880 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
881 amdgpu_bo_size(entry->bo_va->base.bo),
882 entry->pte_flags);
883 if (ret) {
884 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
885 entry->va, ret);
886 return ret;
887 }
888
889 if (no_update_pte)
890 return 0;
891
892 ret = update_gpuvm_pte(adev, entry, sync);
893 if (ret) {
894 pr_err("update_gpuvm_pte() failed\n");
895 goto update_gpuvm_pte_failed;
896 }
897
898 return 0;
899
900update_gpuvm_pte_failed:
901 unmap_bo_from_gpuvm(adev, entry, sync);
902 return ret;
903}
904
905static int process_validate_vms(struct amdkfd_process_info *process_info)
906{
907 struct amdgpu_vm *peer_vm;
908 int ret;
909
910 list_for_each_entry(peer_vm, &process_info->vm_list_head,
911 vm_list_node) {
912 ret = vm_validate_pt_pd_bos(peer_vm);
913 if (ret)
914 return ret;
915 }
916
917 return 0;
918}
919
920static int process_update_pds(struct amdkfd_process_info *process_info,
921 struct amdgpu_sync *sync)
922{
923 struct amdgpu_vm *peer_vm;
924 int ret;
925
926 list_for_each_entry(peer_vm, &process_info->vm_list_head,
927 vm_list_node) {
928 ret = vm_update_pds(peer_vm, sync);
929 if (ret)
930 return ret;
931 }
932
933 return 0;
934}
935
936static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
937 struct dma_fence **ef)
938{
939 struct amdkfd_process_info *info = NULL;
940 int ret;
941
942 if (!*process_info) {
943 info = kzalloc(sizeof(*info), GFP_KERNEL);
944 if (!info)
945 return -ENOMEM;
946
947 mutex_init(&info->lock);
948 INIT_LIST_HEAD(&info->vm_list_head);
949 INIT_LIST_HEAD(&info->kfd_bo_list);
950 INIT_LIST_HEAD(&info->userptr_valid_list);
951 INIT_LIST_HEAD(&info->userptr_inval_list);
952
953 info->eviction_fence =
954 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
955 current->mm);
956 if (!info->eviction_fence) {
957 pr_err("Failed to create eviction fence\n");
958 ret = -ENOMEM;
959 goto create_evict_fence_fail;
960 }
961
962 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
963 atomic_set(&info->evicted_bos, 0);
964 INIT_DELAYED_WORK(&info->restore_userptr_work,
965 amdgpu_amdkfd_restore_userptr_worker);
966
967 *process_info = info;
968 *ef = dma_fence_get(&info->eviction_fence->base);
969 }
970
971 vm->process_info = *process_info;
972
973
974 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
975 if (ret)
976 goto reserve_pd_fail;
977 ret = vm_validate_pt_pd_bos(vm);
978 if (ret) {
979 pr_err("validate_pt_pd_bos() failed\n");
980 goto validate_pd_fail;
981 }
982 ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
983 if (ret)
984 goto wait_pd_fail;
985 amdgpu_bo_fence(vm->root.base.bo,
986 &vm->process_info->eviction_fence->base, true);
987 amdgpu_bo_unreserve(vm->root.base.bo);
988
989
990 mutex_lock(&vm->process_info->lock);
991 list_add_tail(&vm->vm_list_node,
992 &(vm->process_info->vm_list_head));
993 vm->process_info->n_vms++;
994 mutex_unlock(&vm->process_info->lock);
995
996 return 0;
997
998wait_pd_fail:
999validate_pd_fail:
1000 amdgpu_bo_unreserve(vm->root.base.bo);
1001reserve_pd_fail:
1002 vm->process_info = NULL;
1003 if (info) {
1004
1005 dma_fence_put(&info->eviction_fence->base);
1006 dma_fence_put(*ef);
1007 *ef = NULL;
1008 *process_info = NULL;
1009 put_pid(info->pid);
1010create_evict_fence_fail:
1011 mutex_destroy(&info->lock);
1012 kfree(info);
1013 }
1014 return ret;
1015}
1016
1017int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
1018 void **process_info,
1019 struct dma_fence **ef)
1020{
1021 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1022 struct amdgpu_vm *new_vm;
1023 int ret;
1024
1025 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1026 if (!new_vm)
1027 return -ENOMEM;
1028
1029
1030 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
1031 if (ret) {
1032 pr_err("Failed init vm ret %d\n", ret);
1033 goto amdgpu_vm_init_fail;
1034 }
1035
1036
1037 ret = init_kfd_vm(new_vm, process_info, ef);
1038 if (ret)
1039 goto init_kfd_vm_fail;
1040
1041 *vm = (void *) new_vm;
1042
1043 return 0;
1044
1045init_kfd_vm_fail:
1046 amdgpu_vm_fini(adev, new_vm);
1047amdgpu_vm_init_fail:
1048 kfree(new_vm);
1049 return ret;
1050}
1051
1052int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1053 struct file *filp,
1054 void **vm, void **process_info,
1055 struct dma_fence **ef)
1056{
1057 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1058 struct drm_file *drm_priv = filp->private_data;
1059 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1060 struct amdgpu_vm *avm = &drv_priv->vm;
1061 int ret;
1062
1063
1064 if (avm->process_info)
1065 return -EINVAL;
1066
1067
1068 ret = amdgpu_vm_make_compute(adev, avm);
1069 if (ret)
1070 return ret;
1071
1072
1073 ret = init_kfd_vm(avm, process_info, ef);
1074 if (ret)
1075 return ret;
1076
1077 *vm = (void *)avm;
1078
1079 return 0;
1080}
1081
1082void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1083 struct amdgpu_vm *vm)
1084{
1085 struct amdkfd_process_info *process_info = vm->process_info;
1086 struct amdgpu_bo *pd = vm->root.base.bo;
1087
1088 if (!process_info)
1089 return;
1090
1091
1092 amdgpu_bo_reserve(pd, false);
1093 amdgpu_bo_fence(pd, NULL, false);
1094 amdgpu_bo_unreserve(pd);
1095
1096
1097 mutex_lock(&process_info->lock);
1098 process_info->n_vms--;
1099 list_del(&vm->vm_list_node);
1100 mutex_unlock(&process_info->lock);
1101
1102
1103 if (!process_info->n_vms) {
1104 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1105 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1106 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1107
1108 dma_fence_put(&process_info->eviction_fence->base);
1109 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1110 put_pid(process_info->pid);
1111 mutex_destroy(&process_info->lock);
1112 kfree(process_info);
1113 }
1114}
1115
1116void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1117{
1118 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1119 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1120
1121 if (WARN_ON(!kgd || !vm))
1122 return;
1123
1124 pr_debug("Destroying process vm %p\n", vm);
1125
1126
1127 amdgpu_vm_fini(adev, avm);
1128 kfree(vm);
1129}
1130
1131uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1132{
1133 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1134
1135 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1136}
1137
1138int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1139 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1140 void *vm, struct kgd_mem **mem,
1141 uint64_t *offset, uint32_t flags)
1142{
1143 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1144 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1145 uint64_t user_addr = 0;
1146 struct amdgpu_bo *bo;
1147 struct amdgpu_bo_param bp;
1148 int byte_align;
1149 u32 domain, alloc_domain;
1150 u64 alloc_flags;
1151 uint32_t mapping_flags;
1152 int ret;
1153
1154
1155
1156
1157 if (flags & ALLOC_MEM_FLAGS_VRAM) {
1158 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1159 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1160 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1161 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1162 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1163 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1164 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1165 alloc_flags = 0;
1166 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1167 domain = AMDGPU_GEM_DOMAIN_GTT;
1168 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1169 alloc_flags = 0;
1170 if (!offset || !*offset)
1171 return -EINVAL;
1172 user_addr = *offset;
1173 } else {
1174 return -EINVAL;
1175 }
1176
1177 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1178 if (!*mem)
1179 return -ENOMEM;
1180 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1181 mutex_init(&(*mem)->lock);
1182 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1183
1184
1185
1186
1187
1188 if ((*mem)->aql_queue)
1189 size = size >> 1;
1190
1191
1192 byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1193 adev->asic_type != CHIP_FIJI &&
1194 adev->asic_type != CHIP_POLARIS10 &&
1195 adev->asic_type != CHIP_POLARIS11) ?
1196 VI_BO_SIZE_ALIGN : 1;
1197
1198 mapping_flags = AMDGPU_VM_PAGE_READABLE;
1199 if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1200 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1201 if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1202 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1203 if (flags & ALLOC_MEM_FLAGS_COHERENT)
1204 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1205 else
1206 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1207 (*mem)->mapping_flags = mapping_flags;
1208
1209 amdgpu_sync_create(&(*mem)->sync);
1210
1211 ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1212 if (ret) {
1213 pr_debug("Insufficient system memory\n");
1214 goto err_reserve_system_mem;
1215 }
1216
1217 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1218 va, size, domain_string(alloc_domain));
1219
1220 memset(&bp, 0, sizeof(bp));
1221 bp.size = size;
1222 bp.byte_align = byte_align;
1223 bp.domain = alloc_domain;
1224 bp.flags = alloc_flags;
1225 bp.type = ttm_bo_type_device;
1226 bp.resv = NULL;
1227 ret = amdgpu_bo_create(adev, &bp, &bo);
1228 if (ret) {
1229 pr_debug("Failed to create BO on domain %s. ret %d\n",
1230 domain_string(alloc_domain), ret);
1231 goto err_bo_create;
1232 }
1233 bo->kfd_bo = *mem;
1234 (*mem)->bo = bo;
1235 if (user_addr)
1236 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1237
1238 (*mem)->va = va;
1239 (*mem)->domain = domain;
1240 (*mem)->mapped_to_gpu_memory = 0;
1241 (*mem)->process_info = avm->process_info;
1242 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1243
1244 if (user_addr) {
1245 ret = init_user_pages(*mem, current->mm, user_addr);
1246 if (ret) {
1247 mutex_lock(&avm->process_info->lock);
1248 list_del(&(*mem)->validate_list.head);
1249 mutex_unlock(&avm->process_info->lock);
1250 goto allocate_init_user_pages_failed;
1251 }
1252 }
1253
1254 if (offset)
1255 *offset = amdgpu_bo_mmap_offset(bo);
1256
1257 return 0;
1258
1259allocate_init_user_pages_failed:
1260 amdgpu_bo_unref(&bo);
1261
1262 goto err_reserve_system_mem;
1263err_bo_create:
1264 unreserve_system_mem_limit(adev, size, alloc_domain);
1265err_reserve_system_mem:
1266 mutex_destroy(&(*mem)->lock);
1267 kfree(*mem);
1268 return ret;
1269}
1270
1271int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1272 struct kgd_dev *kgd, struct kgd_mem *mem)
1273{
1274 struct amdkfd_process_info *process_info = mem->process_info;
1275 unsigned long bo_size = mem->bo->tbo.mem.size;
1276 struct kfd_bo_va_list *entry, *tmp;
1277 struct bo_vm_reservation_context ctx;
1278 struct ttm_validate_buffer *bo_list_entry;
1279 int ret;
1280
1281 mutex_lock(&mem->lock);
1282
1283 if (mem->mapped_to_gpu_memory > 0) {
1284 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1285 mem->va, bo_size);
1286 mutex_unlock(&mem->lock);
1287 return -EBUSY;
1288 }
1289
1290 mutex_unlock(&mem->lock);
1291
1292
1293
1294
1295
1296 amdgpu_mn_unregister(mem->bo);
1297
1298
1299 bo_list_entry = &mem->validate_list;
1300 mutex_lock(&process_info->lock);
1301 list_del(&bo_list_entry->head);
1302 mutex_unlock(&process_info->lock);
1303
1304
1305 if (mem->user_pages) {
1306 pr_debug("%s: Freeing user_pages array\n", __func__);
1307 if (mem->user_pages[0])
1308 release_pages(mem->user_pages,
1309 mem->bo->tbo.ttm->num_pages);
1310 kvfree(mem->user_pages);
1311 }
1312
1313 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1314 if (unlikely(ret))
1315 return ret;
1316
1317
1318
1319
1320
1321 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1322 process_info->eviction_fence,
1323 NULL, NULL);
1324 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1325 mem->va + bo_size * (1 + mem->aql_queue));
1326
1327
1328 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1329 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1330 entry, bo_size);
1331
1332 ret = unreserve_bo_and_vms(&ctx, false, false);
1333
1334
1335 amdgpu_sync_free(&mem->sync);
1336
1337
1338 amdgpu_bo_unref(&mem->bo);
1339 mutex_destroy(&mem->lock);
1340 kfree(mem);
1341
1342 return ret;
1343}
1344
1345int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1346 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1347{
1348 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1349 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1350 int ret;
1351 struct amdgpu_bo *bo;
1352 uint32_t domain;
1353 struct kfd_bo_va_list *entry;
1354 struct bo_vm_reservation_context ctx;
1355 struct kfd_bo_va_list *bo_va_entry = NULL;
1356 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1357 unsigned long bo_size;
1358 bool is_invalid_userptr = false;
1359
1360 bo = mem->bo;
1361 if (!bo) {
1362 pr_err("Invalid BO when mapping memory to GPU\n");
1363 return -EINVAL;
1364 }
1365
1366
1367
1368
1369
1370 mutex_lock(&mem->process_info->lock);
1371
1372
1373
1374
1375
1376 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1377 down_write(¤t->mm->mmap_sem);
1378 is_invalid_userptr = atomic_read(&mem->invalid);
1379 up_write(¤t->mm->mmap_sem);
1380 }
1381
1382 mutex_lock(&mem->lock);
1383
1384 domain = mem->domain;
1385 bo_size = bo->tbo.mem.size;
1386
1387 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1388 mem->va,
1389 mem->va + bo_size * (1 + mem->aql_queue),
1390 vm, domain_string(domain));
1391
1392 ret = reserve_bo_and_vm(mem, vm, &ctx);
1393 if (unlikely(ret))
1394 goto out;
1395
1396
1397
1398
1399
1400
1401 if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1402 is_invalid_userptr = true;
1403
1404 if (check_if_add_bo_to_vm(avm, mem)) {
1405 ret = add_bo_to_vm(adev, mem, avm, false,
1406 &bo_va_entry);
1407 if (ret)
1408 goto add_bo_to_vm_failed;
1409 if (mem->aql_queue) {
1410 ret = add_bo_to_vm(adev, mem, avm,
1411 true, &bo_va_entry_aql);
1412 if (ret)
1413 goto add_bo_to_vm_failed_aql;
1414 }
1415 } else {
1416 ret = vm_validate_pt_pd_bos(avm);
1417 if (unlikely(ret))
1418 goto add_bo_to_vm_failed;
1419 }
1420
1421 if (mem->mapped_to_gpu_memory == 0 &&
1422 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1423
1424
1425
1426
1427 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1428 if (ret) {
1429 pr_debug("Validate failed\n");
1430 goto map_bo_to_gpuvm_failed;
1431 }
1432 }
1433
1434 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1435 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1436 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1437 entry->va, entry->va + bo_size,
1438 entry);
1439
1440 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1441 is_invalid_userptr);
1442 if (ret) {
1443 pr_err("Failed to map radeon bo to gpuvm\n");
1444 goto map_bo_to_gpuvm_failed;
1445 }
1446
1447 ret = vm_update_pds(vm, ctx.sync);
1448 if (ret) {
1449 pr_err("Failed to update page directories\n");
1450 goto map_bo_to_gpuvm_failed;
1451 }
1452
1453 entry->is_mapped = true;
1454 mem->mapped_to_gpu_memory++;
1455 pr_debug("\t INC mapping count %d\n",
1456 mem->mapped_to_gpu_memory);
1457 }
1458 }
1459
1460 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1461 amdgpu_bo_fence(bo,
1462 &avm->process_info->eviction_fence->base,
1463 true);
1464 ret = unreserve_bo_and_vms(&ctx, false, false);
1465
1466 goto out;
1467
1468map_bo_to_gpuvm_failed:
1469 if (bo_va_entry_aql)
1470 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1471add_bo_to_vm_failed_aql:
1472 if (bo_va_entry)
1473 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1474add_bo_to_vm_failed:
1475 unreserve_bo_and_vms(&ctx, false, false);
1476out:
1477 mutex_unlock(&mem->process_info->lock);
1478 mutex_unlock(&mem->lock);
1479 return ret;
1480}
1481
1482int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1483 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1484{
1485 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1486 struct amdkfd_process_info *process_info =
1487 ((struct amdgpu_vm *)vm)->process_info;
1488 unsigned long bo_size = mem->bo->tbo.mem.size;
1489 struct kfd_bo_va_list *entry;
1490 struct bo_vm_reservation_context ctx;
1491 int ret;
1492
1493 mutex_lock(&mem->lock);
1494
1495 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1496 if (unlikely(ret))
1497 goto out;
1498
1499 if (ctx.n_vms == 0) {
1500 ret = -EINVAL;
1501 goto unreserve_out;
1502 }
1503
1504 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1505 if (unlikely(ret))
1506 goto unreserve_out;
1507
1508 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1509 mem->va,
1510 mem->va + bo_size * (1 + mem->aql_queue),
1511 vm);
1512
1513 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1514 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1515 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1516 entry->va,
1517 entry->va + bo_size,
1518 entry);
1519
1520 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1521 if (ret == 0) {
1522 entry->is_mapped = false;
1523 } else {
1524 pr_err("failed to unmap VA 0x%llx\n",
1525 mem->va);
1526 goto unreserve_out;
1527 }
1528
1529 mem->mapped_to_gpu_memory--;
1530 pr_debug("\t DEC mapping count %d\n",
1531 mem->mapped_to_gpu_memory);
1532 }
1533 }
1534
1535
1536
1537
1538 if (mem->mapped_to_gpu_memory == 0 &&
1539 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1540 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1541 process_info->eviction_fence,
1542 NULL, NULL);
1543
1544unreserve_out:
1545 unreserve_bo_and_vms(&ctx, false, false);
1546out:
1547 mutex_unlock(&mem->lock);
1548 return ret;
1549}
1550
1551int amdgpu_amdkfd_gpuvm_sync_memory(
1552 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1553{
1554 struct amdgpu_sync sync;
1555 int ret;
1556
1557 amdgpu_sync_create(&sync);
1558
1559 mutex_lock(&mem->lock);
1560 amdgpu_sync_clone(&mem->sync, &sync);
1561 mutex_unlock(&mem->lock);
1562
1563 ret = amdgpu_sync_wait(&sync, intr);
1564 amdgpu_sync_free(&sync);
1565 return ret;
1566}
1567
1568int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1569 struct kgd_mem *mem, void **kptr, uint64_t *size)
1570{
1571 int ret;
1572 struct amdgpu_bo *bo = mem->bo;
1573
1574 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1575 pr_err("userptr can't be mapped to kernel\n");
1576 return -EINVAL;
1577 }
1578
1579
1580
1581
1582 mutex_lock(&mem->process_info->lock);
1583
1584 ret = amdgpu_bo_reserve(bo, true);
1585 if (ret) {
1586 pr_err("Failed to reserve bo. ret %d\n", ret);
1587 goto bo_reserve_failed;
1588 }
1589
1590 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
1591 if (ret) {
1592 pr_err("Failed to pin bo. ret %d\n", ret);
1593 goto pin_failed;
1594 }
1595
1596 ret = amdgpu_bo_kmap(bo, kptr);
1597 if (ret) {
1598 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1599 goto kmap_failed;
1600 }
1601
1602 amdgpu_amdkfd_remove_eviction_fence(
1603 bo, mem->process_info->eviction_fence, NULL, NULL);
1604 list_del_init(&mem->validate_list.head);
1605
1606 if (size)
1607 *size = amdgpu_bo_size(bo);
1608
1609 amdgpu_bo_unreserve(bo);
1610
1611 mutex_unlock(&mem->process_info->lock);
1612 return 0;
1613
1614kmap_failed:
1615 amdgpu_bo_unpin(bo);
1616pin_failed:
1617 amdgpu_bo_unreserve(bo);
1618bo_reserve_failed:
1619 mutex_unlock(&mem->process_info->lock);
1620
1621 return ret;
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1636 struct mm_struct *mm)
1637{
1638 struct amdkfd_process_info *process_info = mem->process_info;
1639 int invalid, evicted_bos;
1640 int r = 0;
1641
1642 invalid = atomic_inc_return(&mem->invalid);
1643 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1644 if (evicted_bos == 1) {
1645
1646 r = kgd2kfd->quiesce_mm(mm);
1647 if (r)
1648 pr_err("Failed to quiesce KFD\n");
1649 schedule_delayed_work(&process_info->restore_userptr_work,
1650 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1651 }
1652
1653 return r;
1654}
1655
1656
1657
1658
1659
1660
1661
1662static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1663 struct mm_struct *mm)
1664{
1665 struct kgd_mem *mem, *tmp_mem;
1666 struct amdgpu_bo *bo;
1667 struct ttm_operation_ctx ctx = { false, false };
1668 int invalid, ret;
1669
1670
1671
1672
1673 list_for_each_entry_safe(mem, tmp_mem,
1674 &process_info->userptr_valid_list,
1675 validate_list.head) {
1676 if (!atomic_read(&mem->invalid))
1677 continue;
1678
1679 bo = mem->bo;
1680
1681 if (amdgpu_bo_reserve(bo, true))
1682 return -EAGAIN;
1683 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1684 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1685 amdgpu_bo_unreserve(bo);
1686 if (ret) {
1687 pr_err("%s: Failed to invalidate userptr BO\n",
1688 __func__);
1689 return -EAGAIN;
1690 }
1691
1692 list_move_tail(&mem->validate_list.head,
1693 &process_info->userptr_inval_list);
1694 }
1695
1696 if (list_empty(&process_info->userptr_inval_list))
1697 return 0;
1698
1699
1700 list_for_each_entry(mem, &process_info->userptr_inval_list,
1701 validate_list.head) {
1702 invalid = atomic_read(&mem->invalid);
1703 if (!invalid)
1704
1705
1706
1707 continue;
1708
1709 bo = mem->bo;
1710
1711 if (!mem->user_pages) {
1712 mem->user_pages =
1713 kvmalloc_array(bo->tbo.ttm->num_pages,
1714 sizeof(struct page *),
1715 GFP_KERNEL | __GFP_ZERO);
1716 if (!mem->user_pages) {
1717 pr_err("%s: Failed to allocate pages array\n",
1718 __func__);
1719 return -ENOMEM;
1720 }
1721 } else if (mem->user_pages[0]) {
1722 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1723 }
1724
1725
1726 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1727 mem->user_pages);
1728 if (ret) {
1729 mem->user_pages[0] = NULL;
1730 pr_info("%s: Failed to get user pages: %d\n",
1731 __func__, ret);
1732
1733
1734
1735
1736
1737 }
1738
1739
1740
1741
1742 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1743 return -EAGAIN;
1744 }
1745
1746 return 0;
1747}
1748
1749
1750
1751
1752
1753
1754
1755static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1756{
1757 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1758 struct list_head resv_list, duplicates;
1759 struct ww_acquire_ctx ticket;
1760 struct amdgpu_sync sync;
1761
1762 struct amdgpu_vm *peer_vm;
1763 struct kgd_mem *mem, *tmp_mem;
1764 struct amdgpu_bo *bo;
1765 struct ttm_operation_ctx ctx = { false, false };
1766 int i, ret;
1767
1768 pd_bo_list_entries = kcalloc(process_info->n_vms,
1769 sizeof(struct amdgpu_bo_list_entry),
1770 GFP_KERNEL);
1771 if (!pd_bo_list_entries) {
1772 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1773 return -ENOMEM;
1774 }
1775
1776 INIT_LIST_HEAD(&resv_list);
1777 INIT_LIST_HEAD(&duplicates);
1778
1779
1780 i = 0;
1781 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1782 vm_list_node)
1783 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1784 &pd_bo_list_entries[i++]);
1785
1786 list_for_each_entry(mem, &process_info->userptr_inval_list,
1787 validate_list.head) {
1788 list_add_tail(&mem->resv_list.head, &resv_list);
1789 mem->resv_list.bo = mem->validate_list.bo;
1790 mem->resv_list.shared = mem->validate_list.shared;
1791 }
1792
1793
1794 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1795 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1796 if (ret)
1797 goto out;
1798
1799 amdgpu_sync_create(&sync);
1800
1801
1802
1803
1804
1805 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1806 vm_list_node)
1807 amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1808 process_info->eviction_fence,
1809 NULL, NULL);
1810
1811 ret = process_validate_vms(process_info);
1812 if (ret)
1813 goto unreserve_out;
1814
1815
1816 list_for_each_entry_safe(mem, tmp_mem,
1817 &process_info->userptr_inval_list,
1818 validate_list.head) {
1819 struct kfd_bo_va_list *bo_va_entry;
1820
1821 bo = mem->bo;
1822
1823
1824 if (mem->user_pages[0]) {
1825 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1826 mem->user_pages);
1827 amdgpu_ttm_placement_from_domain(bo, mem->domain);
1828 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1829 if (ret) {
1830 pr_err("%s: failed to validate BO\n", __func__);
1831 goto unreserve_out;
1832 }
1833 }
1834
1835
1836
1837
1838
1839
1840 kvfree(mem->user_pages);
1841 mem->user_pages = NULL;
1842 list_move_tail(&mem->validate_list.head,
1843 &process_info->userptr_valid_list);
1844
1845
1846
1847
1848
1849
1850
1851 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1852 if (!bo_va_entry->is_mapped)
1853 continue;
1854
1855 ret = update_gpuvm_pte((struct amdgpu_device *)
1856 bo_va_entry->kgd_dev,
1857 bo_va_entry, &sync);
1858 if (ret) {
1859 pr_err("%s: update PTE failed\n", __func__);
1860
1861 atomic_inc(&mem->invalid);
1862 goto unreserve_out;
1863 }
1864 }
1865 }
1866
1867
1868 ret = process_update_pds(process_info, &sync);
1869
1870unreserve_out:
1871 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1872 vm_list_node)
1873 amdgpu_bo_fence(peer_vm->root.base.bo,
1874 &process_info->eviction_fence->base, true);
1875 ttm_eu_backoff_reservation(&ticket, &resv_list);
1876 amdgpu_sync_wait(&sync, false);
1877 amdgpu_sync_free(&sync);
1878out:
1879 kfree(pd_bo_list_entries);
1880
1881 return ret;
1882}
1883
1884
1885
1886
1887
1888
1889
1890static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1891{
1892 struct delayed_work *dwork = to_delayed_work(work);
1893 struct amdkfd_process_info *process_info =
1894 container_of(dwork, struct amdkfd_process_info,
1895 restore_userptr_work);
1896 struct task_struct *usertask;
1897 struct mm_struct *mm;
1898 int evicted_bos;
1899
1900 evicted_bos = atomic_read(&process_info->evicted_bos);
1901 if (!evicted_bos)
1902 return;
1903
1904
1905 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1906 if (!usertask)
1907 return;
1908 mm = get_task_mm(usertask);
1909 if (!mm) {
1910 put_task_struct(usertask);
1911 return;
1912 }
1913
1914 mutex_lock(&process_info->lock);
1915
1916 if (update_invalid_user_pages(process_info, mm))
1917 goto unlock_out;
1918
1919
1920
1921
1922 if (!list_empty(&process_info->userptr_inval_list)) {
1923 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1924 goto unlock_out;
1925
1926 if (validate_invalid_user_pages(process_info))
1927 goto unlock_out;
1928 }
1929
1930
1931
1932
1933
1934 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1935 evicted_bos)
1936 goto unlock_out;
1937 evicted_bos = 0;
1938 if (kgd2kfd->resume_mm(mm)) {
1939 pr_err("%s: Failed to resume KFD\n", __func__);
1940
1941
1942
1943 }
1944unlock_out:
1945 mutex_unlock(&process_info->lock);
1946 mmput(mm);
1947 put_task_struct(usertask);
1948
1949
1950 if (evicted_bos)
1951 schedule_delayed_work(&process_info->restore_userptr_work,
1952 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1974{
1975 struct amdgpu_bo_list_entry *pd_bo_list;
1976 struct amdkfd_process_info *process_info = info;
1977 struct amdgpu_vm *peer_vm;
1978 struct kgd_mem *mem;
1979 struct bo_vm_reservation_context ctx;
1980 struct amdgpu_amdkfd_fence *new_fence;
1981 int ret = 0, i;
1982 struct list_head duplicate_save;
1983 struct amdgpu_sync sync_obj;
1984
1985 INIT_LIST_HEAD(&duplicate_save);
1986 INIT_LIST_HEAD(&ctx.list);
1987 INIT_LIST_HEAD(&ctx.duplicates);
1988
1989 pd_bo_list = kcalloc(process_info->n_vms,
1990 sizeof(struct amdgpu_bo_list_entry),
1991 GFP_KERNEL);
1992 if (!pd_bo_list)
1993 return -ENOMEM;
1994
1995 i = 0;
1996 mutex_lock(&process_info->lock);
1997 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1998 vm_list_node)
1999 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2000
2001
2002
2003
2004 list_for_each_entry(mem, &process_info->kfd_bo_list,
2005 validate_list.head) {
2006
2007 list_add_tail(&mem->resv_list.head, &ctx.list);
2008 mem->resv_list.bo = mem->validate_list.bo;
2009 mem->resv_list.shared = mem->validate_list.shared;
2010 }
2011
2012 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2013 false, &duplicate_save);
2014 if (ret) {
2015 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2016 goto ttm_reserve_fail;
2017 }
2018
2019 amdgpu_sync_create(&sync_obj);
2020
2021
2022 ret = process_validate_vms(process_info);
2023 if (ret)
2024 goto validate_map_fail;
2025
2026
2027
2028 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2029 vm_list_node) {
2030 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2031
2032 ttm_bo_wait(&bo->tbo, false, false);
2033 }
2034
2035
2036 list_for_each_entry(mem, &process_info->kfd_bo_list,
2037 validate_list.head) {
2038
2039 struct amdgpu_bo *bo = mem->bo;
2040 uint32_t domain = mem->domain;
2041 struct kfd_bo_va_list *bo_va_entry;
2042
2043 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2044 if (ret) {
2045 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2046 goto validate_map_fail;
2047 }
2048
2049 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2050 bo_list) {
2051 ret = update_gpuvm_pte((struct amdgpu_device *)
2052 bo_va_entry->kgd_dev,
2053 bo_va_entry,
2054 &sync_obj);
2055 if (ret) {
2056 pr_debug("Memory eviction: update PTE failed. Try again\n");
2057 goto validate_map_fail;
2058 }
2059 }
2060 }
2061
2062
2063 ret = process_update_pds(process_info, &sync_obj);
2064 if (ret) {
2065 pr_debug("Memory eviction: update PDs failed. Try again\n");
2066 goto validate_map_fail;
2067 }
2068
2069 amdgpu_sync_wait(&sync_obj, false);
2070
2071
2072
2073
2074
2075 new_fence = amdgpu_amdkfd_fence_create(
2076 process_info->eviction_fence->base.context,
2077 process_info->eviction_fence->mm);
2078 if (!new_fence) {
2079 pr_err("Failed to create eviction fence\n");
2080 ret = -ENOMEM;
2081 goto validate_map_fail;
2082 }
2083 dma_fence_put(&process_info->eviction_fence->base);
2084 process_info->eviction_fence = new_fence;
2085 *ef = dma_fence_get(&new_fence->base);
2086
2087
2088 list_for_each_entry(mem, &process_info->kfd_bo_list,
2089 validate_list.head)
2090 ttm_bo_wait(&mem->bo->tbo, false, false);
2091 list_for_each_entry(mem, &process_info->kfd_bo_list,
2092 validate_list.head)
2093 amdgpu_bo_fence(mem->bo,
2094 &process_info->eviction_fence->base, true);
2095
2096
2097 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2098 vm_list_node) {
2099 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2100
2101 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2102 }
2103
2104validate_map_fail:
2105 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2106 amdgpu_sync_free(&sync_obj);
2107ttm_reserve_fail:
2108 mutex_unlock(&process_info->lock);
2109 kfree(pd_bo_list);
2110 return ret;
2111}
2112