1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/dma-fence-array.h>
29#include <linux/interval_tree_generic.h>
30#include <linux/idr.h>
31
32#include <drm/amdgpu_drm.h>
33#include "amdgpu.h"
34#include "amdgpu_trace.h"
35#include "amdgpu_amdkfd.h"
36#include "amdgpu_gmc.h"
37#include "amdgpu_xgmi.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#define START(node) ((node)->start)
61#define LAST(node) ((node)->last)
62
63INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
64 START, LAST, static, amdgpu_vm_it)
65
66#undef START
67#undef LAST
68
69
70
71
72struct amdgpu_prt_cb {
73
74
75
76
77 struct amdgpu_device *adev;
78
79
80
81
82 struct dma_fence_cb cb;
83};
84
85
86
87
88
89
90static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
91{
92 mutex_lock(&vm->eviction_lock);
93 vm->saved_flags = memalloc_nofs_save();
94}
95
96static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
97{
98 if (mutex_trylock(&vm->eviction_lock)) {
99 vm->saved_flags = memalloc_nofs_save();
100 return 1;
101 }
102 return 0;
103}
104
105static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
106{
107 memalloc_nofs_restore(vm->saved_flags);
108 mutex_unlock(&vm->eviction_lock);
109}
110
111
112
113
114
115
116
117
118
119
120static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
121 unsigned level)
122{
123 switch (level) {
124 case AMDGPU_VM_PDB2:
125 case AMDGPU_VM_PDB1:
126 case AMDGPU_VM_PDB0:
127 return 9 * (AMDGPU_VM_PDB0 - level) +
128 adev->vm_manager.block_size;
129 case AMDGPU_VM_PTB:
130 return 0;
131 default:
132 return ~0;
133 }
134}
135
136
137
138
139
140
141
142
143
144
145static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
146 unsigned level)
147{
148 unsigned shift = amdgpu_vm_level_shift(adev,
149 adev->vm_manager.root_level);
150
151 if (level == adev->vm_manager.root_level)
152
153 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
154 >> shift;
155 else if (level != AMDGPU_VM_PTB)
156
157 return 512;
158 else
159
160 return AMDGPU_VM_PTE_COUNT(adev);
161}
162
163
164
165
166
167
168
169
170
171static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
172{
173 unsigned shift;
174
175 shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
176 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
177}
178
179
180
181
182
183
184
185
186
187
188static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
189 unsigned int level)
190{
191 if (level <= adev->vm_manager.root_level)
192 return 0xffffffff;
193 else if (level != AMDGPU_VM_PTB)
194 return 0x1ff;
195 else
196 return AMDGPU_VM_PTE_COUNT(adev) - 1;
197}
198
199
200
201
202
203
204
205
206
207
208static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
209{
210 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
211}
212
213
214
215
216
217
218
219
220
221static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
222{
223 struct amdgpu_vm *vm = vm_bo->vm;
224 struct amdgpu_bo *bo = vm_bo->bo;
225
226 vm_bo->moved = true;
227 if (bo->tbo.type == ttm_bo_type_kernel)
228 list_move(&vm_bo->vm_status, &vm->evicted);
229 else
230 list_move_tail(&vm_bo->vm_status, &vm->evicted);
231}
232
233
234
235
236
237
238
239
240static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
241{
242 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
243}
244
245
246
247
248
249
250
251
252
253static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
254{
255 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
256 vm_bo->moved = false;
257}
258
259
260
261
262
263
264
265
266
267static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
268{
269 spin_lock(&vm_bo->vm->invalidated_lock);
270 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
271 spin_unlock(&vm_bo->vm->invalidated_lock);
272}
273
274
275
276
277
278
279
280
281
282static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
283{
284 if (vm_bo->bo->parent)
285 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
286 else
287 amdgpu_vm_bo_idle(vm_bo);
288}
289
290
291
292
293
294
295
296
297
298static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
299{
300 spin_lock(&vm_bo->vm->invalidated_lock);
301 list_del_init(&vm_bo->vm_status);
302 spin_unlock(&vm_bo->vm->invalidated_lock);
303}
304
305
306
307
308
309
310
311
312
313
314
315static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
316 struct amdgpu_vm *vm,
317 struct amdgpu_bo *bo)
318{
319 base->vm = vm;
320 base->bo = bo;
321 base->next = NULL;
322 INIT_LIST_HEAD(&base->vm_status);
323
324 if (!bo)
325 return;
326 base->next = bo->vm_bo;
327 bo->vm_bo = base;
328
329 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
330 return;
331
332 vm->bulk_moveable = false;
333 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
334 amdgpu_vm_bo_relocated(base);
335 else
336 amdgpu_vm_bo_idle(base);
337
338 if (bo->preferred_domains &
339 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
340 return;
341
342
343
344
345
346
347 amdgpu_vm_bo_evicted(base);
348}
349
350
351
352
353
354
355
356
357
358static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
359{
360 struct amdgpu_bo *parent = pt->base.bo->parent;
361
362 if (!parent)
363 return NULL;
364
365 return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
366}
367
368
369
370
371struct amdgpu_vm_pt_cursor {
372 uint64_t pfn;
373 struct amdgpu_vm_pt *parent;
374 struct amdgpu_vm_pt *entry;
375 unsigned level;
376};
377
378
379
380
381
382
383
384
385
386
387
388static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
389 struct amdgpu_vm *vm, uint64_t start,
390 struct amdgpu_vm_pt_cursor *cursor)
391{
392 cursor->pfn = start;
393 cursor->parent = NULL;
394 cursor->entry = &vm->root;
395 cursor->level = adev->vm_manager.root_level;
396}
397
398
399
400
401
402
403
404
405
406
407
408static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
409 struct amdgpu_vm_pt_cursor *cursor)
410{
411 unsigned mask, shift, idx;
412
413 if (!cursor->entry->entries)
414 return false;
415
416 BUG_ON(!cursor->entry->base.bo);
417 mask = amdgpu_vm_entries_mask(adev, cursor->level);
418 shift = amdgpu_vm_level_shift(adev, cursor->level);
419
420 ++cursor->level;
421 idx = (cursor->pfn >> shift) & mask;
422 cursor->parent = cursor->entry;
423 cursor->entry = &cursor->entry->entries[idx];
424 return true;
425}
426
427
428
429
430
431
432
433
434
435
436
437static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
438 struct amdgpu_vm_pt_cursor *cursor)
439{
440 unsigned shift, num_entries;
441
442
443 if (!cursor->parent)
444 return false;
445
446
447 shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
448 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
449
450 if (cursor->entry == &cursor->parent->entries[num_entries - 1])
451 return false;
452
453 cursor->pfn += 1ULL << shift;
454 cursor->pfn &= ~((1ULL << shift) - 1);
455 ++cursor->entry;
456 return true;
457}
458
459
460
461
462
463
464
465
466
467
468static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
469{
470 if (!cursor->parent)
471 return false;
472
473 --cursor->level;
474 cursor->entry = cursor->parent;
475 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
476 return true;
477}
478
479
480
481
482
483
484
485
486
487static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
488 struct amdgpu_vm_pt_cursor *cursor)
489{
490
491 if (amdgpu_vm_pt_descendant(adev, cursor))
492 return;
493
494
495 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
496
497 if (!amdgpu_vm_pt_ancestor(cursor)) {
498 cursor->pfn = ~0ll;
499 return;
500 }
501 }
502}
503
504
505
506
507
508
509
510
511
512
513
514static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
515 struct amdgpu_vm *vm,
516 struct amdgpu_vm_pt_cursor *start,
517 struct amdgpu_vm_pt_cursor *cursor)
518{
519 if (start)
520 *cursor = *start;
521 else
522 amdgpu_vm_pt_start(adev, vm, 0, cursor);
523 while (amdgpu_vm_pt_descendant(adev, cursor));
524}
525
526
527
528
529
530
531
532
533
534
535static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
536 struct amdgpu_vm_pt *entry)
537{
538 return entry && (!start || entry != start->entry);
539}
540
541
542
543
544
545
546
547
548
549static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
550 struct amdgpu_vm_pt_cursor *cursor)
551{
552 if (!cursor->entry)
553 return;
554
555 if (!cursor->parent)
556 cursor->entry = NULL;
557 else if (amdgpu_vm_pt_sibling(adev, cursor))
558 while (amdgpu_vm_pt_descendant(adev, cursor));
559 else
560 amdgpu_vm_pt_ancestor(cursor);
561}
562
563
564
565
566#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
567 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
568 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
569 amdgpu_vm_pt_continue_dfs((start), (entry)); \
570 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
571
572
573
574
575
576
577
578
579
580
581
582void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
583 struct list_head *validated,
584 struct amdgpu_bo_list_entry *entry)
585{
586 entry->priority = 0;
587 entry->tv.bo = &vm->root.base.bo->tbo;
588
589 entry->tv.num_shared = 4;
590 entry->user_pages = NULL;
591 list_add(&entry->tv.head, validated);
592}
593
594
595
596
597
598
599
600
601
602void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
603{
604 struct amdgpu_bo *abo;
605 struct amdgpu_vm_bo_base *bo_base;
606
607 if (!amdgpu_bo_is_amdgpu_bo(bo))
608 return;
609
610 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
611 return;
612
613 abo = ttm_to_amdgpu_bo(bo);
614 if (!abo->parent)
615 return;
616 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
617 struct amdgpu_vm *vm = bo_base->vm;
618
619 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
620 vm->bulk_moveable = false;
621 }
622
623}
624
625
626
627
628
629
630
631
632
633void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
634 struct amdgpu_vm *vm)
635{
636 struct amdgpu_vm_bo_base *bo_base;
637
638 if (vm->bulk_moveable) {
639 spin_lock(&ttm_bo_glob.lru_lock);
640 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
641 spin_unlock(&ttm_bo_glob.lru_lock);
642 return;
643 }
644
645 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
646
647 spin_lock(&ttm_bo_glob.lru_lock);
648 list_for_each_entry(bo_base, &vm->idle, vm_status) {
649 struct amdgpu_bo *bo = bo_base->bo;
650
651 if (!bo->parent)
652 continue;
653
654 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
655 if (bo->shadow)
656 ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
657 &vm->lru_bulk_move);
658 }
659 spin_unlock(&ttm_bo_glob.lru_lock);
660
661 vm->bulk_moveable = true;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
678 int (*validate)(void *p, struct amdgpu_bo *bo),
679 void *param)
680{
681 struct amdgpu_vm_bo_base *bo_base, *tmp;
682 int r;
683
684 vm->bulk_moveable &= list_empty(&vm->evicted);
685
686 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
687 struct amdgpu_bo *bo = bo_base->bo;
688
689 r = validate(param, bo);
690 if (r)
691 return r;
692
693 if (bo->tbo.type != ttm_bo_type_kernel) {
694 amdgpu_vm_bo_moved(bo_base);
695 } else {
696 vm->update_funcs->map_table(bo);
697 amdgpu_vm_bo_relocated(bo_base);
698 }
699 }
700
701 amdgpu_vm_eviction_lock(vm);
702 vm->evicting = false;
703 amdgpu_vm_eviction_unlock(vm);
704
705 return 0;
706}
707
708
709
710
711
712
713
714
715
716
717
718bool amdgpu_vm_ready(struct amdgpu_vm *vm)
719{
720 return list_empty(&vm->evicted);
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
737 struct amdgpu_vm *vm,
738 struct amdgpu_bo *bo,
739 bool immediate)
740{
741 struct ttm_operation_ctx ctx = { true, false };
742 unsigned level = adev->vm_manager.root_level;
743 struct amdgpu_vm_update_params params;
744 struct amdgpu_bo *ancestor = bo;
745 unsigned entries, ats_entries;
746 uint64_t addr;
747 int r;
748
749
750 if (ancestor->parent) {
751 ++level;
752 while (ancestor->parent->parent) {
753 ++level;
754 ancestor = ancestor->parent;
755 }
756 }
757
758 entries = amdgpu_bo_size(bo) / 8;
759 if (!vm->pte_support_ats) {
760 ats_entries = 0;
761
762 } else if (!bo->parent) {
763 ats_entries = amdgpu_vm_num_ats_entries(adev);
764 ats_entries = min(ats_entries, entries);
765 entries -= ats_entries;
766
767 } else {
768 struct amdgpu_vm_pt *pt;
769
770 pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
771 ats_entries = amdgpu_vm_num_ats_entries(adev);
772 if ((pt - vm->root.entries) >= ats_entries) {
773 ats_entries = 0;
774 } else {
775 ats_entries = entries;
776 entries = 0;
777 }
778 }
779
780 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
781 if (r)
782 return r;
783
784 if (bo->shadow) {
785 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
786 &ctx);
787 if (r)
788 return r;
789 }
790
791 r = vm->update_funcs->map_table(bo);
792 if (r)
793 return r;
794
795 memset(¶ms, 0, sizeof(params));
796 params.adev = adev;
797 params.vm = vm;
798 params.immediate = immediate;
799
800 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
801 if (r)
802 return r;
803
804 addr = 0;
805 if (ats_entries) {
806 uint64_t value = 0, flags;
807
808 flags = AMDGPU_PTE_DEFAULT_ATC;
809 if (level != AMDGPU_VM_PTB) {
810
811 flags |= AMDGPU_PDE_PTE;
812 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
813 }
814
815 r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries,
816 value, flags);
817 if (r)
818 return r;
819
820 addr += ats_entries * 8;
821 }
822
823 if (entries) {
824 uint64_t value = 0, flags = 0;
825
826 if (adev->asic_type >= CHIP_VEGA10) {
827 if (level != AMDGPU_VM_PTB) {
828
829 flags |= AMDGPU_PDE_PTE;
830 amdgpu_gmc_get_vm_pde(adev, level,
831 &value, &flags);
832 } else {
833
834 flags = AMDGPU_PTE_EXECUTABLE;
835 }
836 }
837
838 r = vm->update_funcs->update(¶ms, bo, addr, 0, entries,
839 value, flags);
840 if (r)
841 return r;
842 }
843
844 return vm->update_funcs->commit(¶ms, NULL);
845}
846
847
848
849
850
851
852
853
854
855
856static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
857 int level, bool immediate,
858 struct amdgpu_bo_param *bp)
859{
860 memset(bp, 0, sizeof(*bp));
861
862 bp->size = amdgpu_vm_bo_size(adev, level);
863 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
864 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
865 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
866 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
867 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
868 if (vm->use_cpu_for_update)
869 bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
870 else if (!vm->root.base.bo || vm->root.base.bo->shadow)
871 bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
872 bp->type = ttm_bo_type_kernel;
873 bp->no_wait_gpu = immediate;
874 if (vm->root.base.bo)
875 bp->resv = vm->root.base.bo->tbo.base.resv;
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
893 struct amdgpu_vm *vm,
894 struct amdgpu_vm_pt_cursor *cursor,
895 bool immediate)
896{
897 struct amdgpu_vm_pt *entry = cursor->entry;
898 struct amdgpu_bo_param bp;
899 struct amdgpu_bo *pt;
900 int r;
901
902 if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
903 unsigned num_entries;
904
905 num_entries = amdgpu_vm_num_entries(adev, cursor->level);
906 entry->entries = kvmalloc_array(num_entries,
907 sizeof(*entry->entries),
908 GFP_KERNEL | __GFP_ZERO);
909 if (!entry->entries)
910 return -ENOMEM;
911 }
912
913 if (entry->base.bo)
914 return 0;
915
916 amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
917
918 r = amdgpu_bo_create(adev, &bp, &pt);
919 if (r)
920 return r;
921
922
923
924
925 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
926 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
927
928 r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
929 if (r)
930 goto error_free_pt;
931
932 return 0;
933
934error_free_pt:
935 amdgpu_bo_unref(&pt->shadow);
936 amdgpu_bo_unref(&pt);
937 return r;
938}
939
940
941
942
943
944
945static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
946{
947 if (entry->base.bo) {
948 entry->base.bo->vm_bo = NULL;
949 list_del(&entry->base.vm_status);
950 amdgpu_bo_unref(&entry->base.bo->shadow);
951 amdgpu_bo_unref(&entry->base.bo);
952 }
953 kvfree(entry->entries);
954 entry->entries = NULL;
955}
956
957
958
959
960
961
962
963
964
965
966static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
967 struct amdgpu_vm *vm,
968 struct amdgpu_vm_pt_cursor *start)
969{
970 struct amdgpu_vm_pt_cursor cursor;
971 struct amdgpu_vm_pt *entry;
972
973 vm->bulk_moveable = false;
974
975 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
976 amdgpu_vm_free_table(entry);
977
978 if (start)
979 amdgpu_vm_free_table(start->entry);
980}
981
982
983
984
985
986
987void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
988{
989 const struct amdgpu_ip_block *ip_block;
990 bool has_compute_vm_bug;
991 struct amdgpu_ring *ring;
992 int i;
993
994 has_compute_vm_bug = false;
995
996 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
997 if (ip_block) {
998
999
1000 if (ip_block->version->major <= 7)
1001 has_compute_vm_bug = true;
1002 else if (ip_block->version->major == 8)
1003 if (adev->gfx.mec_fw_version < 673)
1004 has_compute_vm_bug = true;
1005 }
1006
1007 for (i = 0; i < adev->num_rings; i++) {
1008 ring = adev->rings[i];
1009 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
1010
1011 ring->has_compute_vm_bug = has_compute_vm_bug;
1012 else
1013 ring->has_compute_vm_bug = false;
1014 }
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
1027 struct amdgpu_job *job)
1028{
1029 struct amdgpu_device *adev = ring->adev;
1030 unsigned vmhub = ring->funcs->vmhub;
1031 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1032 struct amdgpu_vmid *id;
1033 bool gds_switch_needed;
1034 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
1035
1036 if (job->vmid == 0)
1037 return false;
1038 id = &id_mgr->ids[job->vmid];
1039 gds_switch_needed = ring->funcs->emit_gds_switch && (
1040 id->gds_base != job->gds_base ||
1041 id->gds_size != job->gds_size ||
1042 id->gws_base != job->gws_base ||
1043 id->gws_size != job->gws_size ||
1044 id->oa_base != job->oa_base ||
1045 id->oa_size != job->oa_size);
1046
1047 if (amdgpu_vmid_had_gpu_reset(adev, id))
1048 return true;
1049
1050 return vm_flush_needed || gds_switch_needed;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
1066 bool need_pipe_sync)
1067{
1068 struct amdgpu_device *adev = ring->adev;
1069 unsigned vmhub = ring->funcs->vmhub;
1070 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
1071 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
1072 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
1073 id->gds_base != job->gds_base ||
1074 id->gds_size != job->gds_size ||
1075 id->gws_base != job->gws_base ||
1076 id->gws_size != job->gws_size ||
1077 id->oa_base != job->oa_base ||
1078 id->oa_size != job->oa_size);
1079 bool vm_flush_needed = job->vm_needs_flush;
1080 struct dma_fence *fence = NULL;
1081 bool pasid_mapping_needed = false;
1082 unsigned patch_offset = 0;
1083 bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
1084 int r;
1085
1086 if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
1087 adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
1088
1089 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
1090 gds_switch_needed = true;
1091 vm_flush_needed = true;
1092 pasid_mapping_needed = true;
1093 }
1094
1095 mutex_lock(&id_mgr->lock);
1096 if (id->pasid != job->pasid || !id->pasid_mapping ||
1097 !dma_fence_is_signaled(id->pasid_mapping))
1098 pasid_mapping_needed = true;
1099 mutex_unlock(&id_mgr->lock);
1100
1101 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
1102 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
1103 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
1104 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
1105 ring->funcs->emit_wreg;
1106
1107 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
1108 return 0;
1109
1110 if (ring->funcs->init_cond_exec)
1111 patch_offset = amdgpu_ring_init_cond_exec(ring);
1112
1113 if (need_pipe_sync)
1114 amdgpu_ring_emit_pipeline_sync(ring);
1115
1116 if (vm_flush_needed) {
1117 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
1118 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
1119 }
1120
1121 if (pasid_mapping_needed)
1122 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
1123
1124 if (vm_flush_needed || pasid_mapping_needed) {
1125 r = amdgpu_fence_emit(ring, &fence, 0);
1126 if (r)
1127 return r;
1128 }
1129
1130 if (vm_flush_needed) {
1131 mutex_lock(&id_mgr->lock);
1132 dma_fence_put(id->last_flush);
1133 id->last_flush = dma_fence_get(fence);
1134 id->current_gpu_reset_count =
1135 atomic_read(&adev->gpu_reset_counter);
1136 mutex_unlock(&id_mgr->lock);
1137 }
1138
1139 if (pasid_mapping_needed) {
1140 mutex_lock(&id_mgr->lock);
1141 id->pasid = job->pasid;
1142 dma_fence_put(id->pasid_mapping);
1143 id->pasid_mapping = dma_fence_get(fence);
1144 mutex_unlock(&id_mgr->lock);
1145 }
1146 dma_fence_put(fence);
1147
1148 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
1149 id->gds_base = job->gds_base;
1150 id->gds_size = job->gds_size;
1151 id->gws_base = job->gws_base;
1152 id->gws_size = job->gws_size;
1153 id->oa_base = job->oa_base;
1154 id->oa_size = job->oa_size;
1155 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
1156 job->gds_size, job->gws_base,
1157 job->gws_size, job->oa_base,
1158 job->oa_size);
1159 }
1160
1161 if (ring->funcs->patch_cond_exec)
1162 amdgpu_ring_patch_cond_exec(ring, patch_offset);
1163
1164
1165 if (ring->funcs->emit_switch_buffer) {
1166 amdgpu_ring_emit_switch_buffer(ring);
1167 amdgpu_ring_emit_switch_buffer(ring);
1168 }
1169 return 0;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1188 struct amdgpu_bo *bo)
1189{
1190 struct amdgpu_vm_bo_base *base;
1191
1192 for (base = bo->vm_bo; base; base = base->next) {
1193 if (base->vm != vm)
1194 continue;
1195
1196 return container_of(base, struct amdgpu_bo_va, base);
1197 }
1198 return NULL;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
1214{
1215 uint64_t result;
1216
1217
1218 result = pages_addr[addr >> PAGE_SHIFT];
1219
1220
1221 result |= addr & (~PAGE_MASK);
1222
1223 result &= 0xFFFFFFFFFFFFF000ULL;
1224
1225 return result;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
1238 struct amdgpu_vm *vm,
1239 struct amdgpu_vm_pt *entry)
1240{
1241 struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
1242 struct amdgpu_bo *bo = parent->base.bo, *pbo;
1243 uint64_t pde, pt, flags;
1244 unsigned level;
1245
1246 for (level = 0, pbo = bo->parent; pbo; ++level)
1247 pbo = pbo->parent;
1248
1249 level += params->adev->vm_manager.root_level;
1250 amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
1251 pde = (entry - parent->entries) * 8;
1252 return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
1264 struct amdgpu_vm *vm)
1265{
1266 struct amdgpu_vm_pt_cursor cursor;
1267 struct amdgpu_vm_pt *entry;
1268
1269 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
1270 if (entry->base.bo && !entry->base.moved)
1271 amdgpu_vm_bo_relocated(&entry->base);
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
1287 struct amdgpu_vm *vm, bool immediate)
1288{
1289 struct amdgpu_vm_update_params params;
1290 int r;
1291
1292 if (list_empty(&vm->relocated))
1293 return 0;
1294
1295 memset(¶ms, 0, sizeof(params));
1296 params.adev = adev;
1297 params.vm = vm;
1298 params.immediate = immediate;
1299
1300 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
1301 if (r)
1302 return r;
1303
1304 while (!list_empty(&vm->relocated)) {
1305 struct amdgpu_vm_pt *entry;
1306
1307 entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
1308 base.vm_status);
1309 amdgpu_vm_bo_idle(&entry->base);
1310
1311 r = amdgpu_vm_update_pde(¶ms, vm, entry);
1312 if (r)
1313 goto error;
1314 }
1315
1316 r = vm->update_funcs->commit(¶ms, &vm->last_update);
1317 if (r)
1318 goto error;
1319 return 0;
1320
1321error:
1322 amdgpu_vm_invalidate_pds(adev, vm);
1323 return r;
1324}
1325
1326
1327
1328
1329
1330
1331static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
1332 struct amdgpu_bo *bo, unsigned level,
1333 uint64_t pe, uint64_t addr,
1334 unsigned count, uint32_t incr,
1335 uint64_t flags)
1336
1337{
1338 if (level != AMDGPU_VM_PTB) {
1339 flags |= AMDGPU_PDE_PTE;
1340 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
1341
1342 } else if (params->adev->asic_type >= CHIP_VEGA10 &&
1343 !(flags & AMDGPU_PTE_VALID) &&
1344 !(flags & AMDGPU_PTE_PRT)) {
1345
1346
1347 flags |= AMDGPU_PTE_EXECUTABLE;
1348 }
1349
1350 params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
1351 flags);
1352}
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
1367 uint64_t start, uint64_t end, uint64_t flags,
1368 unsigned int *frag, uint64_t *frag_end)
1369{
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 unsigned max_frag;
1392
1393 if (params->adev->asic_type < CHIP_VEGA10)
1394 max_frag = params->adev->vm_manager.fragment_size;
1395 else
1396 max_frag = 31;
1397
1398
1399 if (params->pages_addr) {
1400 *frag = 0;
1401 *frag_end = end;
1402 return;
1403 }
1404
1405
1406 *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
1407 if (*frag >= max_frag) {
1408 *frag = max_frag;
1409 *frag_end = end & ~((1ULL << max_frag) - 1);
1410 } else {
1411 *frag_end = start + (1 << *frag);
1412 }
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
1430 uint64_t start, uint64_t end,
1431 uint64_t dst, uint64_t flags)
1432{
1433 struct amdgpu_device *adev = params->adev;
1434 struct amdgpu_vm_pt_cursor cursor;
1435 uint64_t frag_start = start, frag_end;
1436 unsigned int frag;
1437 int r;
1438
1439
1440 amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
1441
1442
1443 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
1444 while (cursor.pfn < end) {
1445 unsigned shift, parent_shift, mask;
1446 uint64_t incr, entry_end, pe_start;
1447 struct amdgpu_bo *pt;
1448
1449 if (!params->unlocked) {
1450
1451
1452
1453 r = amdgpu_vm_alloc_pts(params->adev, params->vm,
1454 &cursor, params->immediate);
1455 if (r)
1456 return r;
1457 }
1458
1459 shift = amdgpu_vm_level_shift(adev, cursor.level);
1460 parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
1461 if (params->unlocked) {
1462
1463 if (amdgpu_vm_pt_descendant(adev, &cursor))
1464 continue;
1465 } else if (adev->asic_type < CHIP_VEGA10 &&
1466 (flags & AMDGPU_PTE_VALID)) {
1467
1468 if (cursor.level != AMDGPU_VM_PTB) {
1469 if (!amdgpu_vm_pt_descendant(adev, &cursor))
1470 return -ENOENT;
1471 continue;
1472 }
1473 } else if (frag < shift) {
1474
1475
1476
1477
1478 if (amdgpu_vm_pt_descendant(adev, &cursor))
1479 continue;
1480 } else if (frag >= parent_shift) {
1481
1482
1483
1484 if (!amdgpu_vm_pt_ancestor(&cursor))
1485 return -EINVAL;
1486 continue;
1487 }
1488
1489 pt = cursor.entry->base.bo;
1490 if (!pt) {
1491
1492 if (flags & AMDGPU_PTE_VALID)
1493 return -ENOENT;
1494
1495
1496
1497
1498 if (!amdgpu_vm_pt_ancestor(&cursor))
1499 return -EINVAL;
1500
1501 pt = cursor.entry->base.bo;
1502 shift = parent_shift;
1503 }
1504
1505
1506 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
1507 mask = amdgpu_vm_entries_mask(adev, cursor.level);
1508 pe_start = ((cursor.pfn >> shift) & mask) * 8;
1509 entry_end = ((uint64_t)mask + 1) << shift;
1510 entry_end += cursor.pfn & ~(entry_end - 1);
1511 entry_end = min(entry_end, end);
1512
1513 do {
1514 uint64_t upd_end = min(entry_end, frag_end);
1515 unsigned nptes = (upd_end - frag_start) >> shift;
1516
1517
1518
1519
1520 nptes = max(nptes, 1u);
1521 amdgpu_vm_update_flags(params, pt, cursor.level,
1522 pe_start, dst, nptes, incr,
1523 flags | AMDGPU_PTE_FRAG(frag));
1524
1525 pe_start += nptes * 8;
1526 dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
1527
1528 frag_start = upd_end;
1529 if (frag_start >= frag_end) {
1530
1531 amdgpu_vm_fragment(params, frag_start, end,
1532 flags, &frag, &frag_end);
1533 if (frag < shift)
1534 break;
1535 }
1536 } while (frag_start < entry_end);
1537
1538 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
1539
1540
1541
1542
1543
1544
1545 while (cursor.pfn < frag_start) {
1546 amdgpu_vm_free_pts(adev, params->vm, &cursor);
1547 amdgpu_vm_pt_next(adev, &cursor);
1548 }
1549
1550 } else if (frag >= shift) {
1551
1552 amdgpu_vm_pt_next(adev, &cursor);
1553 }
1554 }
1555
1556 return 0;
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1580 struct amdgpu_vm *vm, bool immediate,
1581 bool unlocked, struct dma_resv *resv,
1582 uint64_t start, uint64_t last,
1583 uint64_t flags, uint64_t addr,
1584 dma_addr_t *pages_addr,
1585 struct dma_fence **fence)
1586{
1587 struct amdgpu_vm_update_params params;
1588 enum amdgpu_sync_mode sync_mode;
1589 int r;
1590
1591 memset(¶ms, 0, sizeof(params));
1592 params.adev = adev;
1593 params.vm = vm;
1594 params.immediate = immediate;
1595 params.pages_addr = pages_addr;
1596 params.unlocked = unlocked;
1597
1598
1599
1600
1601 if (!(flags & AMDGPU_PTE_VALID))
1602 sync_mode = AMDGPU_SYNC_EQ_OWNER;
1603 else
1604 sync_mode = AMDGPU_SYNC_EXPLICIT;
1605
1606 amdgpu_vm_eviction_lock(vm);
1607 if (vm->evicting) {
1608 r = -EBUSY;
1609 goto error_unlock;
1610 }
1611
1612 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1613 struct dma_fence *tmp = dma_fence_get_stub();
1614
1615 amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
1616 swap(vm->last_unlocked, tmp);
1617 dma_fence_put(tmp);
1618 }
1619
1620 r = vm->update_funcs->prepare(¶ms, resv, sync_mode);
1621 if (r)
1622 goto error_unlock;
1623
1624 r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags);
1625 if (r)
1626 goto error_unlock;
1627
1628 r = vm->update_funcs->commit(¶ms, fence);
1629
1630error_unlock:
1631 amdgpu_vm_eviction_unlock(vm);
1632 return r;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1655 struct dma_resv *resv,
1656 dma_addr_t *pages_addr,
1657 struct amdgpu_vm *vm,
1658 struct amdgpu_bo_va_mapping *mapping,
1659 uint64_t flags,
1660 struct amdgpu_device *bo_adev,
1661 struct drm_mm_node *nodes,
1662 struct dma_fence **fence)
1663{
1664 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1665 uint64_t pfn, start = mapping->start;
1666 int r;
1667
1668
1669
1670
1671 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1672 flags &= ~AMDGPU_PTE_READABLE;
1673 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1674 flags &= ~AMDGPU_PTE_WRITEABLE;
1675
1676
1677 amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
1678
1679 trace_amdgpu_vm_bo_update(mapping);
1680
1681 pfn = mapping->offset >> PAGE_SHIFT;
1682 if (nodes) {
1683 while (pfn >= nodes->size) {
1684 pfn -= nodes->size;
1685 ++nodes;
1686 }
1687 }
1688
1689 do {
1690 dma_addr_t *dma_addr = NULL;
1691 uint64_t max_entries;
1692 uint64_t addr, last;
1693
1694 if (nodes) {
1695 addr = nodes->start << PAGE_SHIFT;
1696 max_entries = (nodes->size - pfn) *
1697 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1698 } else {
1699 addr = 0;
1700 max_entries = S64_MAX;
1701 }
1702
1703 if (pages_addr) {
1704 uint64_t count;
1705
1706 for (count = 1;
1707 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1708 ++count) {
1709 uint64_t idx = pfn + count;
1710
1711 if (pages_addr[idx] !=
1712 (pages_addr[idx - 1] + PAGE_SIZE))
1713 break;
1714 }
1715
1716 if (count < min_linear_pages) {
1717 addr = pfn << PAGE_SHIFT;
1718 dma_addr = pages_addr;
1719 } else {
1720 addr = pages_addr[pfn];
1721 max_entries = count *
1722 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1723 }
1724
1725 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
1726 addr += bo_adev->vm_manager.vram_base_offset;
1727 addr += pfn << PAGE_SHIFT;
1728 }
1729
1730 last = min((uint64_t)mapping->last, start + max_entries - 1);
1731 r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
1732 start, last, flags, addr,
1733 dma_addr, fence);
1734 if (r)
1735 return r;
1736
1737 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1738 if (nodes && nodes->size == pfn) {
1739 pfn = 0;
1740 ++nodes;
1741 }
1742 start = last + 1;
1743
1744 } while (unlikely(start != mapping->last + 1));
1745
1746 return 0;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1762 bool clear)
1763{
1764 struct amdgpu_bo *bo = bo_va->base.bo;
1765 struct amdgpu_vm *vm = bo_va->base.vm;
1766 struct amdgpu_bo_va_mapping *mapping;
1767 dma_addr_t *pages_addr = NULL;
1768 struct ttm_mem_reg *mem;
1769 struct drm_mm_node *nodes;
1770 struct dma_fence **last_update;
1771 struct dma_resv *resv;
1772 uint64_t flags;
1773 struct amdgpu_device *bo_adev = adev;
1774 int r;
1775
1776 if (clear || !bo) {
1777 mem = NULL;
1778 nodes = NULL;
1779 resv = vm->root.base.bo->tbo.base.resv;
1780 } else {
1781 struct ttm_dma_tt *ttm;
1782
1783 mem = &bo->tbo.mem;
1784 nodes = mem->mm_node;
1785 if (mem->mem_type == TTM_PL_TT) {
1786 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1787 pages_addr = ttm->dma_address;
1788 }
1789 resv = bo->tbo.base.resv;
1790 }
1791
1792 if (bo) {
1793 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1794
1795 if (amdgpu_bo_encrypted(bo))
1796 flags |= AMDGPU_PTE_TMZ;
1797
1798 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1799 } else {
1800 flags = 0x0;
1801 }
1802
1803 if (clear || (bo && bo->tbo.base.resv ==
1804 vm->root.base.bo->tbo.base.resv))
1805 last_update = &vm->last_update;
1806 else
1807 last_update = &bo_va->last_pt_update;
1808
1809 if (!clear && bo_va->base.moved) {
1810 bo_va->base.moved = false;
1811 list_splice_init(&bo_va->valids, &bo_va->invalids);
1812
1813 } else if (bo_va->cleared != clear) {
1814 list_splice_init(&bo_va->valids, &bo_va->invalids);
1815 }
1816
1817 list_for_each_entry(mapping, &bo_va->invalids, list) {
1818 r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
1819 mapping, flags, bo_adev, nodes,
1820 last_update);
1821 if (r)
1822 return r;
1823 }
1824
1825
1826
1827
1828
1829 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
1830 uint32_t mem_type = bo->tbo.mem.mem_type;
1831
1832 if (!(bo->preferred_domains &
1833 amdgpu_mem_type_to_domain(mem_type)))
1834 amdgpu_vm_bo_evicted(&bo_va->base);
1835 else
1836 amdgpu_vm_bo_idle(&bo_va->base);
1837 } else {
1838 amdgpu_vm_bo_done(&bo_va->base);
1839 }
1840
1841 list_splice_init(&bo_va->invalids, &bo_va->valids);
1842 bo_va->cleared = clear;
1843
1844 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1845 list_for_each_entry(mapping, &bo_va->valids, list)
1846 trace_amdgpu_vm_bo_mapping(mapping);
1847 }
1848
1849 return 0;
1850}
1851
1852
1853
1854
1855
1856
1857static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1858{
1859 unsigned long flags;
1860 bool enable;
1861
1862 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1863 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1864 adev->gmc.gmc_funcs->set_prt(adev, enable);
1865 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1866}
1867
1868
1869
1870
1871
1872
1873static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1874{
1875 if (!adev->gmc.gmc_funcs->set_prt)
1876 return;
1877
1878 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1879 amdgpu_vm_update_prt_state(adev);
1880}
1881
1882
1883
1884
1885
1886
1887static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1888{
1889 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1890 amdgpu_vm_update_prt_state(adev);
1891}
1892
1893
1894
1895
1896
1897
1898
1899static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1900{
1901 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1902
1903 amdgpu_vm_prt_put(cb->adev);
1904 kfree(cb);
1905}
1906
1907
1908
1909
1910
1911
1912
1913static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1914 struct dma_fence *fence)
1915{
1916 struct amdgpu_prt_cb *cb;
1917
1918 if (!adev->gmc.gmc_funcs->set_prt)
1919 return;
1920
1921 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1922 if (!cb) {
1923
1924 if (fence)
1925 dma_fence_wait(fence, false);
1926
1927 amdgpu_vm_prt_put(adev);
1928 } else {
1929 cb->adev = adev;
1930 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1931 amdgpu_vm_prt_cb))
1932 amdgpu_vm_prt_cb(fence, &cb->cb);
1933 }
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1947 struct amdgpu_vm *vm,
1948 struct amdgpu_bo_va_mapping *mapping,
1949 struct dma_fence *fence)
1950{
1951 if (mapping->flags & AMDGPU_PTE_PRT)
1952 amdgpu_vm_add_prt_cb(adev, fence);
1953 kfree(mapping);
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1965{
1966 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
1967 struct dma_fence *excl, **shared;
1968 unsigned i, shared_count;
1969 int r;
1970
1971 r = dma_resv_get_fences_rcu(resv, &excl,
1972 &shared_count, &shared);
1973 if (r) {
1974
1975
1976
1977 dma_resv_wait_timeout_rcu(resv, true, false,
1978 MAX_SCHEDULE_TIMEOUT);
1979 return;
1980 }
1981
1982
1983 amdgpu_vm_prt_get(adev);
1984 amdgpu_vm_add_prt_cb(adev, excl);
1985
1986 for (i = 0; i < shared_count; ++i) {
1987 amdgpu_vm_prt_get(adev);
1988 amdgpu_vm_add_prt_cb(adev, shared[i]);
1989 }
1990
1991 kfree(shared);
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2010 struct amdgpu_vm *vm,
2011 struct dma_fence **fence)
2012{
2013 struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
2014 struct amdgpu_bo_va_mapping *mapping;
2015 uint64_t init_pte_value = 0;
2016 struct dma_fence *f = NULL;
2017 int r;
2018
2019 while (!list_empty(&vm->freed)) {
2020 mapping = list_first_entry(&vm->freed,
2021 struct amdgpu_bo_va_mapping, list);
2022 list_del(&mapping->list);
2023
2024 if (vm->pte_support_ats &&
2025 mapping->start < AMDGPU_GMC_HOLE_START)
2026 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
2027
2028 r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
2029 mapping->start, mapping->last,
2030 init_pte_value, 0, NULL, &f);
2031 amdgpu_vm_free_mapping(adev, vm, mapping, f);
2032 if (r) {
2033 dma_fence_put(f);
2034 return r;
2035 }
2036 }
2037
2038 if (fence && f) {
2039 dma_fence_put(*fence);
2040 *fence = f;
2041 } else {
2042 dma_fence_put(f);
2043 }
2044
2045 return 0;
2046
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
2063 struct amdgpu_vm *vm)
2064{
2065 struct amdgpu_bo_va *bo_va, *tmp;
2066 struct dma_resv *resv;
2067 bool clear;
2068 int r;
2069
2070 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2071
2072 r = amdgpu_vm_bo_update(adev, bo_va, false);
2073 if (r)
2074 return r;
2075 }
2076
2077 spin_lock(&vm->invalidated_lock);
2078 while (!list_empty(&vm->invalidated)) {
2079 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
2080 base.vm_status);
2081 resv = bo_va->base.bo->tbo.base.resv;
2082 spin_unlock(&vm->invalidated_lock);
2083
2084
2085 if (!amdgpu_vm_debug && dma_resv_trylock(resv))
2086 clear = false;
2087
2088 else
2089 clear = true;
2090
2091 r = amdgpu_vm_bo_update(adev, bo_va, clear);
2092 if (r)
2093 return r;
2094
2095 if (!clear)
2096 dma_resv_unlock(resv);
2097 spin_lock(&vm->invalidated_lock);
2098 }
2099 spin_unlock(&vm->invalidated_lock);
2100
2101 return 0;
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2120 struct amdgpu_vm *vm,
2121 struct amdgpu_bo *bo)
2122{
2123 struct amdgpu_bo_va *bo_va;
2124
2125 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
2126 if (bo_va == NULL) {
2127 return NULL;
2128 }
2129 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2130
2131 bo_va->ref_count = 1;
2132 INIT_LIST_HEAD(&bo_va->valids);
2133 INIT_LIST_HEAD(&bo_va->invalids);
2134
2135 if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
2136 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
2137 bo_va->is_xgmi = true;
2138
2139 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
2140 }
2141
2142 return bo_va;
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2156 struct amdgpu_bo_va *bo_va,
2157 struct amdgpu_bo_va_mapping *mapping)
2158{
2159 struct amdgpu_vm *vm = bo_va->base.vm;
2160 struct amdgpu_bo *bo = bo_va->base.bo;
2161
2162 mapping->bo_va = bo_va;
2163 list_add(&mapping->list, &bo_va->invalids);
2164 amdgpu_vm_it_insert(mapping, &vm->va);
2165
2166 if (mapping->flags & AMDGPU_PTE_PRT)
2167 amdgpu_vm_prt_get(adev);
2168
2169 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
2170 !bo_va->base.moved) {
2171 list_move(&bo_va->base.vm_status, &vm->moved);
2172 }
2173 trace_amdgpu_vm_bo_map(bo_va, mapping);
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2194 struct amdgpu_bo_va *bo_va,
2195 uint64_t saddr, uint64_t offset,
2196 uint64_t size, uint64_t flags)
2197{
2198 struct amdgpu_bo_va_mapping *mapping, *tmp;
2199 struct amdgpu_bo *bo = bo_va->base.bo;
2200 struct amdgpu_vm *vm = bo_va->base.vm;
2201 uint64_t eaddr;
2202
2203
2204 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2205 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2206 return -EINVAL;
2207
2208
2209 eaddr = saddr + size - 1;
2210 if (saddr >= eaddr ||
2211 (bo && offset + size > amdgpu_bo_size(bo)) ||
2212 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2213 return -EINVAL;
2214
2215 saddr /= AMDGPU_GPU_PAGE_SIZE;
2216 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2217
2218 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2219 if (tmp) {
2220
2221 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2222 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2223 tmp->start, tmp->last + 1);
2224 return -EINVAL;
2225 }
2226
2227 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2228 if (!mapping)
2229 return -ENOMEM;
2230
2231 mapping->start = saddr;
2232 mapping->last = eaddr;
2233 mapping->offset = offset;
2234 mapping->flags = flags;
2235
2236 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2237
2238 return 0;
2239}
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2260 struct amdgpu_bo_va *bo_va,
2261 uint64_t saddr, uint64_t offset,
2262 uint64_t size, uint64_t flags)
2263{
2264 struct amdgpu_bo_va_mapping *mapping;
2265 struct amdgpu_bo *bo = bo_va->base.bo;
2266 uint64_t eaddr;
2267 int r;
2268
2269
2270 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2271 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2272 return -EINVAL;
2273
2274
2275 eaddr = saddr + size - 1;
2276 if (saddr >= eaddr ||
2277 (bo && offset + size > amdgpu_bo_size(bo)) ||
2278 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
2279 return -EINVAL;
2280
2281
2282 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2283 if (!mapping)
2284 return -ENOMEM;
2285
2286 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2287 if (r) {
2288 kfree(mapping);
2289 return r;
2290 }
2291
2292 saddr /= AMDGPU_GPU_PAGE_SIZE;
2293 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2294
2295 mapping->start = saddr;
2296 mapping->last = eaddr;
2297 mapping->offset = offset;
2298 mapping->flags = flags;
2299
2300 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2301
2302 return 0;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2320 struct amdgpu_bo_va *bo_va,
2321 uint64_t saddr)
2322{
2323 struct amdgpu_bo_va_mapping *mapping;
2324 struct amdgpu_vm *vm = bo_va->base.vm;
2325 bool valid = true;
2326
2327 saddr /= AMDGPU_GPU_PAGE_SIZE;
2328
2329 list_for_each_entry(mapping, &bo_va->valids, list) {
2330 if (mapping->start == saddr)
2331 break;
2332 }
2333
2334 if (&mapping->list == &bo_va->valids) {
2335 valid = false;
2336
2337 list_for_each_entry(mapping, &bo_va->invalids, list) {
2338 if (mapping->start == saddr)
2339 break;
2340 }
2341
2342 if (&mapping->list == &bo_va->invalids)
2343 return -ENOENT;
2344 }
2345
2346 list_del(&mapping->list);
2347 amdgpu_vm_it_remove(mapping, &vm->va);
2348 mapping->bo_va = NULL;
2349 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2350
2351 if (valid)
2352 list_add(&mapping->list, &vm->freed);
2353 else
2354 amdgpu_vm_free_mapping(adev, vm, mapping,
2355 bo_va->last_pt_update);
2356
2357 return 0;
2358}
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2374 struct amdgpu_vm *vm,
2375 uint64_t saddr, uint64_t size)
2376{
2377 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2378 LIST_HEAD(removed);
2379 uint64_t eaddr;
2380
2381 eaddr = saddr + size - 1;
2382 saddr /= AMDGPU_GPU_PAGE_SIZE;
2383 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2384
2385
2386 before = kzalloc(sizeof(*before), GFP_KERNEL);
2387 if (!before)
2388 return -ENOMEM;
2389 INIT_LIST_HEAD(&before->list);
2390
2391 after = kzalloc(sizeof(*after), GFP_KERNEL);
2392 if (!after) {
2393 kfree(before);
2394 return -ENOMEM;
2395 }
2396 INIT_LIST_HEAD(&after->list);
2397
2398
2399 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2400 while (tmp) {
2401
2402 if (tmp->start < saddr) {
2403 before->start = tmp->start;
2404 before->last = saddr - 1;
2405 before->offset = tmp->offset;
2406 before->flags = tmp->flags;
2407 before->bo_va = tmp->bo_va;
2408 list_add(&before->list, &tmp->bo_va->invalids);
2409 }
2410
2411
2412 if (tmp->last > eaddr) {
2413 after->start = eaddr + 1;
2414 after->last = tmp->last;
2415 after->offset = tmp->offset;
2416 after->offset += after->start - tmp->start;
2417 after->flags = tmp->flags;
2418 after->bo_va = tmp->bo_va;
2419 list_add(&after->list, &tmp->bo_va->invalids);
2420 }
2421
2422 list_del(&tmp->list);
2423 list_add(&tmp->list, &removed);
2424
2425 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2426 }
2427
2428
2429 list_for_each_entry_safe(tmp, next, &removed, list) {
2430 amdgpu_vm_it_remove(tmp, &vm->va);
2431 list_del(&tmp->list);
2432
2433 if (tmp->start < saddr)
2434 tmp->start = saddr;
2435 if (tmp->last > eaddr)
2436 tmp->last = eaddr;
2437
2438 tmp->bo_va = NULL;
2439 list_add(&tmp->list, &vm->freed);
2440 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2441 }
2442
2443
2444 if (!list_empty(&before->list)) {
2445 amdgpu_vm_it_insert(before, &vm->va);
2446 if (before->flags & AMDGPU_PTE_PRT)
2447 amdgpu_vm_prt_get(adev);
2448 } else {
2449 kfree(before);
2450 }
2451
2452
2453 if (!list_empty(&after->list)) {
2454 amdgpu_vm_it_insert(after, &vm->va);
2455 if (after->flags & AMDGPU_PTE_PRT)
2456 amdgpu_vm_prt_get(adev);
2457 } else {
2458 kfree(after);
2459 }
2460
2461 return 0;
2462}
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2477 uint64_t addr)
2478{
2479 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2491{
2492 struct amdgpu_bo_va_mapping *mapping;
2493
2494 if (!trace_amdgpu_vm_bo_cs_enabled())
2495 return;
2496
2497 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2498 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2499 if (mapping->bo_va && mapping->bo_va->base.bo) {
2500 struct amdgpu_bo *bo;
2501
2502 bo = mapping->bo_va->base.bo;
2503 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2504 ticket)
2505 continue;
2506 }
2507
2508 trace_amdgpu_vm_bo_cs(mapping);
2509 }
2510}
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2523 struct amdgpu_bo_va *bo_va)
2524{
2525 struct amdgpu_bo_va_mapping *mapping, *next;
2526 struct amdgpu_bo *bo = bo_va->base.bo;
2527 struct amdgpu_vm *vm = bo_va->base.vm;
2528 struct amdgpu_vm_bo_base **base;
2529
2530 if (bo) {
2531 if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2532 vm->bulk_moveable = false;
2533
2534 for (base = &bo_va->base.bo->vm_bo; *base;
2535 base = &(*base)->next) {
2536 if (*base != &bo_va->base)
2537 continue;
2538
2539 *base = bo_va->base.next;
2540 break;
2541 }
2542 }
2543
2544 spin_lock(&vm->invalidated_lock);
2545 list_del(&bo_va->base.vm_status);
2546 spin_unlock(&vm->invalidated_lock);
2547
2548 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2549 list_del(&mapping->list);
2550 amdgpu_vm_it_remove(mapping, &vm->va);
2551 mapping->bo_va = NULL;
2552 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2553 list_add(&mapping->list, &vm->freed);
2554 }
2555 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2556 list_del(&mapping->list);
2557 amdgpu_vm_it_remove(mapping, &vm->va);
2558 amdgpu_vm_free_mapping(adev, vm, mapping,
2559 bo_va->last_pt_update);
2560 }
2561
2562 dma_fence_put(bo_va->last_pt_update);
2563
2564 if (bo && bo_va->is_xgmi)
2565 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
2566
2567 kfree(bo_va);
2568}
2569
2570
2571
2572
2573
2574
2575
2576
2577bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
2578{
2579 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2580
2581
2582 if (!bo_base || !bo_base->vm)
2583 return true;
2584
2585
2586 if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true))
2587 return false;
2588
2589
2590 if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2591 return false;
2592
2593
2594 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2595 amdgpu_vm_eviction_unlock(bo_base->vm);
2596 return false;
2597 }
2598
2599 bo_base->vm->evicting = true;
2600 amdgpu_vm_eviction_unlock(bo_base->vm);
2601 return true;
2602}
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2614 struct amdgpu_bo *bo, bool evicted)
2615{
2616 struct amdgpu_vm_bo_base *bo_base;
2617
2618
2619 if (bo->parent && bo->parent->shadow == bo)
2620 bo = bo->parent;
2621
2622 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2623 struct amdgpu_vm *vm = bo_base->vm;
2624
2625 if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
2626 amdgpu_vm_bo_evicted(bo_base);
2627 continue;
2628 }
2629
2630 if (bo_base->moved)
2631 continue;
2632 bo_base->moved = true;
2633
2634 if (bo->tbo.type == ttm_bo_type_kernel)
2635 amdgpu_vm_bo_relocated(bo_base);
2636 else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
2637 amdgpu_vm_bo_moved(bo_base);
2638 else
2639 amdgpu_vm_bo_invalidated(bo_base);
2640 }
2641}
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2652{
2653
2654 unsigned bits = ilog2(vm_size) + 18;
2655
2656
2657
2658 if (vm_size <= 8)
2659 return (bits - 9);
2660 else
2661 return ((bits + 3) / 2);
2662}
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2675 uint32_t fragment_size_default, unsigned max_level,
2676 unsigned max_bits)
2677{
2678 unsigned int max_size = 1 << (max_bits - 30);
2679 unsigned int vm_size;
2680 uint64_t tmp;
2681
2682
2683 if (amdgpu_vm_size != -1) {
2684 vm_size = amdgpu_vm_size;
2685 if (vm_size > max_size) {
2686 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2687 amdgpu_vm_size, max_size);
2688 vm_size = max_size;
2689 }
2690 } else {
2691 struct sysinfo si;
2692 unsigned int phys_ram_gb;
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709 si_meminfo(&si);
2710 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2711 (1 << 30) - 1) >> 30;
2712 vm_size = roundup_pow_of_two(
2713 min(max(phys_ram_gb * 3, min_vm_size), max_size));
2714 }
2715
2716 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2717
2718 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2719 if (amdgpu_vm_block_size != -1)
2720 tmp >>= amdgpu_vm_block_size - 9;
2721 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2722 adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2723 switch (adev->vm_manager.num_level) {
2724 case 3:
2725 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2726 break;
2727 case 2:
2728 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2729 break;
2730 case 1:
2731 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2732 break;
2733 default:
2734 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2735 }
2736
2737 if (amdgpu_vm_block_size != -1)
2738 adev->vm_manager.block_size =
2739 min((unsigned)amdgpu_vm_block_size, max_bits
2740 - AMDGPU_GPU_PAGE_SHIFT
2741 - 9 * adev->vm_manager.num_level);
2742 else if (adev->vm_manager.num_level > 1)
2743 adev->vm_manager.block_size = 9;
2744 else
2745 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2746
2747 if (amdgpu_vm_fragment_size == -1)
2748 adev->vm_manager.fragment_size = fragment_size_default;
2749 else
2750 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2751
2752 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2753 vm_size, adev->vm_manager.num_level + 1,
2754 adev->vm_manager.block_size,
2755 adev->vm_manager.fragment_size);
2756}
2757
2758
2759
2760
2761
2762
2763
2764long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2765{
2766 timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
2767 true, true, timeout);
2768 if (timeout <= 0)
2769 return timeout;
2770
2771 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2788 int vm_context, unsigned int pasid)
2789{
2790 struct amdgpu_bo_param bp;
2791 struct amdgpu_bo *root;
2792 int r, i;
2793
2794 vm->va = RB_ROOT_CACHED;
2795 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2796 vm->reserved_vmid[i] = NULL;
2797 INIT_LIST_HEAD(&vm->evicted);
2798 INIT_LIST_HEAD(&vm->relocated);
2799 INIT_LIST_HEAD(&vm->moved);
2800 INIT_LIST_HEAD(&vm->idle);
2801 INIT_LIST_HEAD(&vm->invalidated);
2802 spin_lock_init(&vm->invalidated_lock);
2803 INIT_LIST_HEAD(&vm->freed);
2804
2805
2806
2807 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
2808 adev->vm_manager.vm_pte_scheds,
2809 adev->vm_manager.vm_pte_num_scheds, NULL);
2810 if (r)
2811 return r;
2812
2813 r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
2814 adev->vm_manager.vm_pte_scheds,
2815 adev->vm_manager.vm_pte_num_scheds, NULL);
2816 if (r)
2817 goto error_free_immediate;
2818
2819 vm->pte_support_ats = false;
2820 vm->is_compute_context = false;
2821
2822 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2823 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2824 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2825
2826 if (adev->asic_type == CHIP_RAVEN)
2827 vm->pte_support_ats = true;
2828 } else {
2829 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2830 AMDGPU_VM_USE_CPU_FOR_GFX);
2831 }
2832 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2833 vm->use_cpu_for_update ? "CPU" : "SDMA");
2834 WARN_ONCE((vm->use_cpu_for_update &&
2835 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2836 "CPU update of VM recommended only for large BAR system\n");
2837
2838 if (vm->use_cpu_for_update)
2839 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2840 else
2841 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2842 vm->last_update = NULL;
2843 vm->last_unlocked = dma_fence_get_stub();
2844
2845 mutex_init(&vm->eviction_lock);
2846 vm->evicting = false;
2847
2848 amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
2849 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
2850 bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
2851 r = amdgpu_bo_create(adev, &bp, &root);
2852 if (r)
2853 goto error_free_delayed;
2854
2855 r = amdgpu_bo_reserve(root, true);
2856 if (r)
2857 goto error_free_root;
2858
2859 r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
2860 if (r)
2861 goto error_unreserve;
2862
2863 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2864
2865 r = amdgpu_vm_clear_bo(adev, vm, root, false);
2866 if (r)
2867 goto error_unreserve;
2868
2869 amdgpu_bo_unreserve(vm->root.base.bo);
2870
2871 if (pasid) {
2872 unsigned long flags;
2873
2874 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2875 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2876 GFP_ATOMIC);
2877 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2878 if (r < 0)
2879 goto error_free_root;
2880
2881 vm->pasid = pasid;
2882 }
2883
2884 INIT_KFIFO(vm->faults);
2885
2886 return 0;
2887
2888error_unreserve:
2889 amdgpu_bo_unreserve(vm->root.base.bo);
2890
2891error_free_root:
2892 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2893 amdgpu_bo_unref(&vm->root.base.bo);
2894 vm->root.base.bo = NULL;
2895
2896error_free_delayed:
2897 dma_fence_put(vm->last_unlocked);
2898 drm_sched_entity_destroy(&vm->delayed);
2899
2900error_free_immediate:
2901 drm_sched_entity_destroy(&vm->immediate);
2902
2903 return r;
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2920 struct amdgpu_vm *vm)
2921{
2922 enum amdgpu_vm_level root = adev->vm_manager.root_level;
2923 unsigned int entries = amdgpu_vm_num_entries(adev, root);
2924 unsigned int i = 0;
2925
2926 if (!(vm->root.entries))
2927 return 0;
2928
2929 for (i = 0; i < entries; i++) {
2930 if (vm->root.entries[i].base.bo)
2931 return -EINVAL;
2932 }
2933
2934 return 0;
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2959 unsigned int pasid)
2960{
2961 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2962 int r;
2963
2964 r = amdgpu_bo_reserve(vm->root.base.bo, true);
2965 if (r)
2966 return r;
2967
2968
2969 r = amdgpu_vm_check_clean_reserved(adev, vm);
2970 if (r)
2971 goto unreserve_bo;
2972
2973 if (pasid) {
2974 unsigned long flags;
2975
2976 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2977 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2978 GFP_ATOMIC);
2979 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2980
2981 if (r == -ENOSPC)
2982 goto unreserve_bo;
2983 r = 0;
2984 }
2985
2986
2987
2988
2989 if (pte_support_ats != vm->pte_support_ats) {
2990 vm->pte_support_ats = pte_support_ats;
2991 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
2992 if (r)
2993 goto free_idr;
2994 }
2995
2996
2997 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2998 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2999 DRM_DEBUG_DRIVER("VM update mode is %s\n",
3000 vm->use_cpu_for_update ? "CPU" : "SDMA");
3001 WARN_ONCE((vm->use_cpu_for_update &&
3002 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
3003 "CPU update of VM recommended only for large BAR system\n");
3004
3005 if (vm->use_cpu_for_update) {
3006
3007 r = amdgpu_bo_sync_wait(vm->root.base.bo,
3008 AMDGPU_FENCE_OWNER_UNDEFINED, true);
3009 if (r)
3010 goto free_idr;
3011
3012 vm->update_funcs = &amdgpu_vm_cpu_funcs;
3013 } else {
3014 vm->update_funcs = &amdgpu_vm_sdma_funcs;
3015 }
3016 dma_fence_put(vm->last_update);
3017 vm->last_update = NULL;
3018 vm->is_compute_context = true;
3019
3020 if (vm->pasid) {
3021 unsigned long flags;
3022
3023 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3024 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3025 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3026
3027
3028
3029
3030 amdgpu_pasid_free(vm->pasid);
3031 vm->pasid = 0;
3032 }
3033
3034
3035 amdgpu_bo_unref(&vm->root.base.bo->shadow);
3036
3037 if (pasid)
3038 vm->pasid = pasid;
3039
3040 goto unreserve_bo;
3041
3042free_idr:
3043 if (pasid) {
3044 unsigned long flags;
3045
3046 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3047 idr_remove(&adev->vm_manager.pasid_idr, pasid);
3048 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3049 }
3050unreserve_bo:
3051 amdgpu_bo_unreserve(vm->root.base.bo);
3052 return r;
3053}
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3064{
3065 if (vm->pasid) {
3066 unsigned long flags;
3067
3068 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3069 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3070 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3071 }
3072 vm->pasid = 0;
3073 vm->is_compute_context = false;
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
3086{
3087 struct amdgpu_bo_va_mapping *mapping, *tmp;
3088 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
3089 struct amdgpu_bo *root;
3090 int i;
3091
3092 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
3093
3094 root = amdgpu_bo_ref(vm->root.base.bo);
3095 amdgpu_bo_reserve(root, true);
3096 if (vm->pasid) {
3097 unsigned long flags;
3098
3099 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3100 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
3101 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3102 vm->pasid = 0;
3103 }
3104
3105 dma_fence_wait(vm->last_unlocked, false);
3106 dma_fence_put(vm->last_unlocked);
3107
3108 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
3109 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
3110 amdgpu_vm_prt_fini(adev, vm);
3111 prt_fini_needed = false;
3112 }
3113
3114 list_del(&mapping->list);
3115 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
3116 }
3117
3118 amdgpu_vm_free_pts(adev, vm, NULL);
3119 amdgpu_bo_unreserve(root);
3120 amdgpu_bo_unref(&root);
3121 WARN_ON(vm->root.base.bo);
3122
3123 drm_sched_entity_destroy(&vm->immediate);
3124 drm_sched_entity_destroy(&vm->delayed);
3125
3126 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
3127 dev_err(adev->dev, "still active bo inside vm\n");
3128 }
3129 rbtree_postorder_for_each_entry_safe(mapping, tmp,
3130 &vm->va.rb_root, rb) {
3131
3132
3133
3134 list_del(&mapping->list);
3135 kfree(mapping);
3136 }
3137
3138 dma_fence_put(vm->last_update);
3139 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
3140 amdgpu_vmid_free_reserved(adev, vm, i);
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150void amdgpu_vm_manager_init(struct amdgpu_device *adev)
3151{
3152 unsigned i;
3153
3154 amdgpu_vmid_mgr_init(adev);
3155
3156 adev->vm_manager.fence_context =
3157 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3158 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
3159 adev->vm_manager.seqno[i] = 0;
3160
3161 spin_lock_init(&adev->vm_manager.prt_lock);
3162 atomic_set(&adev->vm_manager.num_prt_users, 0);
3163
3164
3165
3166
3167#ifdef CONFIG_X86_64
3168 if (amdgpu_vm_update_mode == -1) {
3169 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
3170 adev->vm_manager.vm_update_mode =
3171 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
3172 else
3173 adev->vm_manager.vm_update_mode = 0;
3174 } else
3175 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
3176#else
3177 adev->vm_manager.vm_update_mode = 0;
3178#endif
3179
3180 idr_init(&adev->vm_manager.pasid_idr);
3181 spin_lock_init(&adev->vm_manager.pasid_lock);
3182}
3183
3184
3185
3186
3187
3188
3189
3190
3191void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
3192{
3193 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
3194 idr_destroy(&adev->vm_manager.pasid_idr);
3195
3196 amdgpu_vmid_mgr_fini(adev);
3197}
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
3210{
3211 union drm_amdgpu_vm *args = data;
3212 struct amdgpu_device *adev = dev->dev_private;
3213 struct amdgpu_fpriv *fpriv = filp->driver_priv;
3214 long timeout = msecs_to_jiffies(2000);
3215 int r;
3216
3217 switch (args->in.op) {
3218 case AMDGPU_VM_OP_RESERVE_VMID:
3219
3220 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
3221 AMDGPU_GFXHUB_0);
3222 if (r)
3223 return r;
3224 break;
3225 case AMDGPU_VM_OP_UNRESERVE_VMID:
3226 if (amdgpu_sriov_runtime(adev))
3227 timeout = 8 * timeout;
3228
3229
3230
3231
3232 r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
3233 if (r)
3234 return r;
3235
3236 r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
3237 if (r < 0)
3238 return r;
3239
3240 amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
3241 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
3242 break;
3243 default:
3244 return -EINVAL;
3245 }
3246
3247 return 0;
3248}
3249
3250
3251
3252
3253
3254
3255
3256
3257void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3258 struct amdgpu_task_info *task_info)
3259{
3260 struct amdgpu_vm *vm;
3261 unsigned long flags;
3262
3263 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3264
3265 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3266 if (vm)
3267 *task_info = vm->task_info;
3268
3269 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3270}
3271
3272
3273
3274
3275
3276
3277void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3278{
3279 if (vm->task_info.pid)
3280 return;
3281
3282 vm->task_info.pid = current->pid;
3283 get_task_comm(vm->task_info.task_name, current);
3284
3285 if (current->group_leader->mm != current->mm)
3286 return;
3287
3288 vm->task_info.tgid = current->group_leader->pid;
3289 get_task_comm(vm->task_info.process_name, current->group_leader);
3290}
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
3302 uint64_t addr)
3303{
3304 struct amdgpu_bo *root;
3305 uint64_t value, flags;
3306 struct amdgpu_vm *vm;
3307 long r;
3308
3309 spin_lock(&adev->vm_manager.pasid_lock);
3310 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3311 if (vm)
3312 root = amdgpu_bo_ref(vm->root.base.bo);
3313 else
3314 root = NULL;
3315 spin_unlock(&adev->vm_manager.pasid_lock);
3316
3317 if (!root)
3318 return false;
3319
3320 r = amdgpu_bo_reserve(root, true);
3321 if (r)
3322 goto error_unref;
3323
3324
3325 spin_lock(&adev->vm_manager.pasid_lock);
3326 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3327 if (vm && vm->root.base.bo != root)
3328 vm = NULL;
3329 spin_unlock(&adev->vm_manager.pasid_lock);
3330 if (!vm)
3331 goto error_unlock;
3332
3333 addr /= AMDGPU_GPU_PAGE_SIZE;
3334 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
3335 AMDGPU_PTE_SYSTEM;
3336
3337 if (vm->is_compute_context) {
3338
3339
3340
3341 flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
3342 AMDGPU_PTE_TF;
3343 value = 0;
3344
3345 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
3346
3347 value = adev->dummy_page_addr;
3348 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
3349 AMDGPU_PTE_WRITEABLE;
3350
3351 } else {
3352
3353 value = 0;
3354 }
3355
3356 r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
3357 addr + 1, flags, value, NULL, NULL);
3358 if (r)
3359 goto error_unlock;
3360
3361 r = amdgpu_vm_update_pdes(adev, vm, true);
3362
3363error_unlock:
3364 amdgpu_bo_unreserve(root);
3365 if (r < 0)
3366 DRM_ERROR("Can't handle page fault (%ld)\n", r);
3367
3368error_unref:
3369 amdgpu_bo_unref(&root);
3370
3371 return false;
3372}
3373