1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <drm/drm_drv.h>
25
26#include "amdgpu.h"
27#include "amdgpu_trace.h"
28#include "amdgpu_vm.h"
29
30
31
32
33struct amdgpu_vm_pt_cursor {
34 uint64_t pfn;
35 struct amdgpu_vm_bo_base *parent;
36 struct amdgpu_vm_bo_base *entry;
37 unsigned int level;
38};
39
40
41
42
43
44
45
46
47
48
49static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
50 unsigned int level)
51{
52 switch (level) {
53 case AMDGPU_VM_PDB2:
54 case AMDGPU_VM_PDB1:
55 case AMDGPU_VM_PDB0:
56 return 9 * (AMDGPU_VM_PDB0 - level) +
57 adev->vm_manager.block_size;
58 case AMDGPU_VM_PTB:
59 return 0;
60 default:
61 return ~0;
62 }
63}
64
65
66
67
68
69
70
71
72
73
74static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
75 unsigned int level)
76{
77 unsigned int shift;
78
79 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
80 if (level == adev->vm_manager.root_level)
81
82 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
83 >> shift;
84 else if (level != AMDGPU_VM_PTB)
85
86 return 512;
87
88
89 return AMDGPU_VM_PTE_COUNT(adev);
90}
91
92
93
94
95
96
97
98
99
100static unsigned int amdgpu_vm_pt_num_ats_entries(struct amdgpu_device *adev)
101{
102 unsigned int shift;
103
104 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
105 return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
106}
107
108
109
110
111
112
113
114
115
116
117static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
118 unsigned int level)
119{
120 if (level <= adev->vm_manager.root_level)
121 return 0xffffffff;
122 else if (level != AMDGPU_VM_PTB)
123 return 0x1ff;
124 else
125 return AMDGPU_VM_PTE_COUNT(adev) - 1;
126}
127
128
129
130
131
132
133
134
135
136
137static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
138 unsigned int level)
139{
140 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
141}
142
143
144
145
146
147
148
149
150
151static struct amdgpu_vm_bo_base *
152amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
153{
154 struct amdgpu_bo *parent = pt->bo->parent;
155
156 if (!parent)
157 return NULL;
158
159 return parent->vm_bo;
160}
161
162
163
164
165
166
167
168
169
170
171
172static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
173 struct amdgpu_vm *vm, uint64_t start,
174 struct amdgpu_vm_pt_cursor *cursor)
175{
176 cursor->pfn = start;
177 cursor->parent = NULL;
178 cursor->entry = &vm->root;
179 cursor->level = adev->vm_manager.root_level;
180}
181
182
183
184
185
186
187
188
189
190
191
192static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
193 struct amdgpu_vm_pt_cursor *cursor)
194{
195 unsigned int mask, shift, idx;
196
197 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
198 !cursor->entry->bo)
199 return false;
200
201 mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
202 shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
203
204 ++cursor->level;
205 idx = (cursor->pfn >> shift) & mask;
206 cursor->parent = cursor->entry;
207 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
208 return true;
209}
210
211
212
213
214
215
216
217
218
219
220
221static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
222 struct amdgpu_vm_pt_cursor *cursor)
223{
224
225 unsigned int shift, num_entries;
226 struct amdgpu_bo_vm *parent;
227
228
229 if (!cursor->parent)
230 return false;
231
232
233 shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
234 num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
235 parent = to_amdgpu_bo_vm(cursor->parent->bo);
236
237 if (cursor->entry == &parent->entries[num_entries - 1])
238 return false;
239
240 cursor->pfn += 1ULL << shift;
241 cursor->pfn &= ~((1ULL << shift) - 1);
242 ++cursor->entry;
243 return true;
244}
245
246
247
248
249
250
251
252
253
254
255static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
256{
257 if (!cursor->parent)
258 return false;
259
260 --cursor->level;
261 cursor->entry = cursor->parent;
262 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
263 return true;
264}
265
266
267
268
269
270
271
272
273
274static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
275 struct amdgpu_vm_pt_cursor *cursor)
276{
277
278 if (amdgpu_vm_pt_descendant(adev, cursor))
279 return;
280
281
282 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
283
284 if (!amdgpu_vm_pt_ancestor(cursor)) {
285 cursor->pfn = ~0ll;
286 return;
287 }
288 }
289}
290
291
292
293
294
295
296
297
298
299
300
301static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
302 struct amdgpu_vm *vm,
303 struct amdgpu_vm_pt_cursor *start,
304 struct amdgpu_vm_pt_cursor *cursor)
305{
306 if (start)
307 *cursor = *start;
308 else
309 amdgpu_vm_pt_start(adev, vm, 0, cursor);
310
311 while (amdgpu_vm_pt_descendant(adev, cursor))
312 ;
313}
314
315
316
317
318
319
320
321
322
323
324static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
325 struct amdgpu_vm_bo_base *entry)
326{
327 return entry && (!start || entry != start->entry);
328}
329
330
331
332
333
334
335
336
337
338static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
339 struct amdgpu_vm_pt_cursor *cursor)
340{
341 if (!cursor->entry)
342 return;
343
344 if (!cursor->parent)
345 cursor->entry = NULL;
346 else if (amdgpu_vm_pt_sibling(adev, cursor))
347 while (amdgpu_vm_pt_descendant(adev, cursor))
348 ;
349 else
350 amdgpu_vm_pt_ancestor(cursor);
351}
352
353
354
355
356#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
357 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
358 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
359 amdgpu_vm_pt_continue_dfs((start), (entry)); \
360 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
376 struct amdgpu_bo_vm *vmbo, bool immediate)
377{
378 unsigned int level = adev->vm_manager.root_level;
379 struct ttm_operation_ctx ctx = { true, false };
380 struct amdgpu_vm_update_params params;
381 struct amdgpu_bo *ancestor = &vmbo->bo;
382 unsigned int entries, ats_entries;
383 struct amdgpu_bo *bo = &vmbo->bo;
384 uint64_t addr;
385 int r, idx;
386
387
388 if (ancestor->parent) {
389 ++level;
390 while (ancestor->parent->parent) {
391 ++level;
392 ancestor = ancestor->parent;
393 }
394 }
395
396 entries = amdgpu_bo_size(bo) / 8;
397 if (!vm->pte_support_ats) {
398 ats_entries = 0;
399
400 } else if (!bo->parent) {
401 ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
402 ats_entries = min(ats_entries, entries);
403 entries -= ats_entries;
404
405 } else {
406 struct amdgpu_vm_bo_base *pt;
407
408 pt = ancestor->vm_bo;
409 ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
410 if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >=
411 ats_entries) {
412 ats_entries = 0;
413 } else {
414 ats_entries = entries;
415 entries = 0;
416 }
417 }
418
419 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
420 if (r)
421 return r;
422
423 if (vmbo->shadow) {
424 struct amdgpu_bo *shadow = vmbo->shadow;
425
426 r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
427 if (r)
428 return r;
429 }
430
431 if (!drm_dev_enter(adev_to_drm(adev), &idx))
432 return -ENODEV;
433
434 r = vm->update_funcs->map_table(vmbo);
435 if (r)
436 goto exit;
437
438 memset(¶ms, 0, sizeof(params));
439 params.adev = adev;
440 params.vm = vm;
441 params.immediate = immediate;
442
443 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT);
444 if (r)
445 goto exit;
446
447 addr = 0;
448 if (ats_entries) {
449 uint64_t value = 0, flags;
450
451 flags = AMDGPU_PTE_DEFAULT_ATC;
452 if (level != AMDGPU_VM_PTB) {
453
454 flags |= AMDGPU_PDE_PTE;
455 amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
456 }
457
458 r = vm->update_funcs->update(¶ms, vmbo, addr, 0,
459 ats_entries, value, flags);
460 if (r)
461 goto exit;
462
463 addr += ats_entries * 8;
464 }
465
466 if (entries) {
467 uint64_t value = 0, flags = 0;
468
469 if (adev->asic_type >= CHIP_VEGA10) {
470 if (level != AMDGPU_VM_PTB) {
471
472 flags |= AMDGPU_PDE_PTE;
473 amdgpu_gmc_get_vm_pde(adev, level,
474 &value, &flags);
475 } else {
476
477 flags = AMDGPU_PTE_EXECUTABLE;
478 }
479 }
480
481 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries,
482 value, flags);
483 if (r)
484 goto exit;
485 }
486
487 r = vm->update_funcs->commit(¶ms, NULL);
488exit:
489 drm_dev_exit(idx);
490 return r;
491}
492
493
494
495
496
497
498
499
500
501
502int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
503 int level, bool immediate, struct amdgpu_bo_vm **vmbo)
504{
505 struct amdgpu_bo_param bp;
506 struct amdgpu_bo *bo;
507 struct dma_resv *resv;
508 unsigned int num_entries;
509 int r;
510
511 memset(&bp, 0, sizeof(bp));
512
513 bp.size = amdgpu_vm_pt_size(adev, level);
514 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
515 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
516 bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
517 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
518 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
519
520 if (level < AMDGPU_VM_PTB)
521 num_entries = amdgpu_vm_pt_num_entries(adev, level);
522 else
523 num_entries = 0;
524
525 bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
526
527 if (vm->use_cpu_for_update)
528 bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
529
530 bp.type = ttm_bo_type_kernel;
531 bp.no_wait_gpu = immediate;
532 if (vm->root.bo)
533 bp.resv = vm->root.bo->tbo.base.resv;
534
535 r = amdgpu_bo_create_vm(adev, &bp, vmbo);
536 if (r)
537 return r;
538
539 bo = &(*vmbo)->bo;
540 if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
541 (*vmbo)->shadow = NULL;
542 return 0;
543 }
544
545 if (!bp.resv)
546 WARN_ON(dma_resv_lock(bo->tbo.base.resv,
547 NULL));
548 resv = bp.resv;
549 memset(&bp, 0, sizeof(bp));
550 bp.size = amdgpu_vm_pt_size(adev, level);
551 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
552 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
553 bp.type = ttm_bo_type_kernel;
554 bp.resv = bo->tbo.base.resv;
555 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
556
557 r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
558
559 if (!resv)
560 dma_resv_unlock(bo->tbo.base.resv);
561
562 if (r) {
563 amdgpu_bo_unref(&bo);
564 return r;
565 }
566
567 (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
568 amdgpu_bo_add_to_shadow_list(*vmbo);
569
570 return 0;
571}
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
588 struct amdgpu_vm *vm,
589 struct amdgpu_vm_pt_cursor *cursor,
590 bool immediate)
591{
592 struct amdgpu_vm_bo_base *entry = cursor->entry;
593 struct amdgpu_bo *pt_bo;
594 struct amdgpu_bo_vm *pt;
595 int r;
596
597 if (entry->bo)
598 return 0;
599
600 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
601 if (r)
602 return r;
603
604
605
606
607 pt_bo = &pt->bo;
608 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
609 amdgpu_vm_bo_base_init(entry, vm, pt_bo);
610 r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
611 if (r)
612 goto error_free_pt;
613
614 return 0;
615
616error_free_pt:
617 amdgpu_bo_unref(&pt->shadow);
618 amdgpu_bo_unref(&pt_bo);
619 return r;
620}
621
622
623
624
625
626
627static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
628{
629 struct amdgpu_bo *shadow;
630
631 if (!entry->bo)
632 return;
633 shadow = amdgpu_bo_shadowed(entry->bo);
634 if (shadow) {
635 ttm_bo_set_bulk_move(&shadow->tbo, NULL);
636 amdgpu_bo_unref(&shadow);
637 }
638 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
639 entry->bo->vm_bo = NULL;
640 list_del(&entry->vm_status);
641 amdgpu_bo_unref(&entry->bo);
642}
643
644
645
646
647
648
649
650
651
652
653static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
654 struct amdgpu_vm *vm,
655 struct amdgpu_vm_pt_cursor *start)
656{
657 struct amdgpu_vm_pt_cursor cursor;
658 struct amdgpu_vm_bo_base *entry;
659
660 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
661 amdgpu_vm_pt_free(entry);
662
663 if (start)
664 amdgpu_vm_pt_free(start->entry);
665}
666
667
668
669
670
671
672
673
674void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
675{
676 amdgpu_vm_pt_free_dfs(adev, vm, NULL);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
693 struct amdgpu_vm *vm)
694{
695 enum amdgpu_vm_level root = adev->vm_manager.root_level;
696 unsigned int entries = amdgpu_vm_pt_num_entries(adev, root);
697 unsigned int i = 0;
698
699 for (i = 0; i < entries; i++) {
700 if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
701 return false;
702 }
703 return true;
704}
705
706
707
708
709
710
711
712
713
714int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
715 struct amdgpu_vm_bo_base *entry)
716{
717 struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
718 struct amdgpu_bo *bo = parent->bo, *pbo;
719 struct amdgpu_vm *vm = params->vm;
720 uint64_t pde, pt, flags;
721 unsigned int level;
722
723 for (level = 0, pbo = bo->parent; pbo; ++level)
724 pbo = pbo->parent;
725
726 level += params->adev->vm_manager.root_level;
727 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
728 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
729 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
730 1, 0, flags);
731}
732
733
734
735
736
737
738static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
739 struct amdgpu_bo_vm *pt,
740 unsigned int level,
741 uint64_t pe, uint64_t addr,
742 unsigned int count, uint32_t incr,
743 uint64_t flags)
744
745{
746 if (level != AMDGPU_VM_PTB) {
747 flags |= AMDGPU_PDE_PTE;
748 amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
749
750 } else if (params->adev->asic_type >= CHIP_VEGA10 &&
751 !(flags & AMDGPU_PTE_VALID) &&
752 !(flags & AMDGPU_PTE_PRT)) {
753
754
755 flags |= AMDGPU_PTE_EXECUTABLE;
756 }
757
758 params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
759 flags);
760}
761
762
763
764
765
766
767
768
769
770
771
772
773
774static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
775 uint64_t start, uint64_t end, uint64_t flags,
776 unsigned int *frag, uint64_t *frag_end)
777{
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799 unsigned int max_frag;
800
801 if (params->adev->asic_type < CHIP_VEGA10)
802 max_frag = params->adev->vm_manager.fragment_size;
803 else
804 max_frag = 31;
805
806
807 if (params->pages_addr) {
808 *frag = 0;
809 *frag_end = end;
810 return;
811 }
812
813
814 *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
815 if (*frag >= max_frag) {
816 *frag = max_frag;
817 *frag_end = end & ~((1ULL << max_frag) - 1);
818 } else {
819 *frag_end = start + (1 << *frag);
820 }
821}
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
838 uint64_t start, uint64_t end,
839 uint64_t dst, uint64_t flags)
840{
841 struct amdgpu_device *adev = params->adev;
842 struct amdgpu_vm_pt_cursor cursor;
843 uint64_t frag_start = start, frag_end;
844 unsigned int frag;
845 int r;
846
847
848 amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
849 &frag_end);
850
851
852 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
853 while (cursor.pfn < end) {
854 unsigned int shift, parent_shift, mask;
855 uint64_t incr, entry_end, pe_start;
856 struct amdgpu_bo *pt;
857
858 if (!params->unlocked) {
859
860
861
862 r = amdgpu_vm_pt_alloc(params->adev, params->vm,
863 &cursor, params->immediate);
864 if (r)
865 return r;
866 }
867
868 shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
869 parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
870 if (params->unlocked) {
871
872 if (amdgpu_vm_pt_descendant(adev, &cursor))
873 continue;
874 } else if (adev->asic_type < CHIP_VEGA10 &&
875 (flags & AMDGPU_PTE_VALID)) {
876
877 if (cursor.level != AMDGPU_VM_PTB) {
878 if (!amdgpu_vm_pt_descendant(adev, &cursor))
879 return -ENOENT;
880 continue;
881 }
882 } else if (frag < shift) {
883
884
885
886
887 if (amdgpu_vm_pt_descendant(adev, &cursor))
888 continue;
889 } else if (frag >= parent_shift) {
890
891
892
893 if (!amdgpu_vm_pt_ancestor(&cursor))
894 return -EINVAL;
895 continue;
896 }
897
898 pt = cursor.entry->bo;
899 if (!pt) {
900
901 if (flags & AMDGPU_PTE_VALID)
902 return -ENOENT;
903
904
905
906
907 if (!amdgpu_vm_pt_ancestor(&cursor))
908 return -EINVAL;
909
910 pt = cursor.entry->bo;
911 shift = parent_shift;
912 frag_end = max(frag_end, ALIGN(frag_start + 1,
913 1ULL << shift));
914 }
915
916
917 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
918 mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
919 pe_start = ((cursor.pfn >> shift) & mask) * 8;
920 entry_end = ((uint64_t)mask + 1) << shift;
921 entry_end += cursor.pfn & ~(entry_end - 1);
922 entry_end = min(entry_end, end);
923
924 do {
925 struct amdgpu_vm *vm = params->vm;
926 uint64_t upd_end = min(entry_end, frag_end);
927 unsigned int nptes = (upd_end - frag_start) >> shift;
928 uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
929
930
931
932
933 nptes = max(nptes, 1u);
934
935 trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
936 min(nptes, 32u), dst, incr,
937 upd_flags,
938 vm->task_info.pid,
939 vm->immediate.fence_context);
940 amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
941 cursor.level, pe_start, dst,
942 nptes, incr, upd_flags);
943
944 pe_start += nptes * 8;
945 dst += nptes * incr;
946
947 frag_start = upd_end;
948 if (frag_start >= frag_end) {
949
950 amdgpu_vm_pte_fragment(params, frag_start, end,
951 flags, &frag, &frag_end);
952 if (frag < shift)
953 break;
954 }
955 } while (frag_start < entry_end);
956
957 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
958
959
960
961
962
963
964 while (cursor.pfn < frag_start) {
965
966 if (cursor.entry->bo) {
967 params->table_freed = true;
968 amdgpu_vm_pt_free_dfs(adev, params->vm,
969 &cursor);
970 }
971 amdgpu_vm_pt_next(adev, &cursor);
972 }
973
974 } else if (frag >= shift) {
975
976 amdgpu_vm_pt_next(adev, &cursor);
977 }
978 }
979
980 return 0;
981}
982