1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <linux/dma-buf.h>
35
36#include <drm/amdgpu_drm.h>
37#include <drm/drm_cache.h>
38#include "amdgpu.h"
39#include "amdgpu_trace.h"
40#include "amdgpu_amdkfd.h"
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
64{
65 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
66
67 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
68 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
69 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
70 &adev->visible_pin_size);
71 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
72 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
73 }
74}
75
76static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
77{
78 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
79 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
80
81 if (bo->tbo.pin_count > 0)
82 amdgpu_bo_subtract_pin_size(bo);
83
84 amdgpu_bo_kunmap(bo);
85
86 if (bo->tbo.base.import_attach)
87 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
88 drm_gem_object_release(&bo->tbo.base);
89
90 if (!list_empty(&bo->shadow_list)) {
91 mutex_lock(&adev->shadow_list_lock);
92 list_del_init(&bo->shadow_list);
93 mutex_unlock(&adev->shadow_list_lock);
94 }
95 amdgpu_bo_unref(&bo->parent);
96
97 kfree(bo->metadata);
98 kfree(bo);
99}
100
101
102
103
104
105
106
107
108
109
110
111bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
112{
113 if (bo->destroy == &amdgpu_bo_destroy)
114 return true;
115 return false;
116}
117
118
119
120
121
122
123
124
125
126void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
127{
128 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
129 struct ttm_placement *placement = &abo->placement;
130 struct ttm_place *places = abo->placements;
131 u64 flags = abo->flags;
132 u32 c = 0;
133
134 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
135 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
136
137 places[c].fpfn = 0;
138 places[c].lpfn = 0;
139 places[c].mem_type = TTM_PL_VRAM;
140 places[c].flags = 0;
141
142 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
143 places[c].lpfn = visible_pfn;
144 else
145 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
146
147 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
148 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
149 c++;
150 }
151
152 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
153 places[c].fpfn = 0;
154 places[c].lpfn = 0;
155 places[c].mem_type = TTM_PL_TT;
156 places[c].flags = 0;
157 c++;
158 }
159
160 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
161 places[c].fpfn = 0;
162 places[c].lpfn = 0;
163 places[c].mem_type = TTM_PL_SYSTEM;
164 places[c].flags = 0;
165 c++;
166 }
167
168 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
169 places[c].fpfn = 0;
170 places[c].lpfn = 0;
171 places[c].mem_type = AMDGPU_PL_GDS;
172 places[c].flags = 0;
173 c++;
174 }
175
176 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
177 places[c].fpfn = 0;
178 places[c].lpfn = 0;
179 places[c].mem_type = AMDGPU_PL_GWS;
180 places[c].flags = 0;
181 c++;
182 }
183
184 if (domain & AMDGPU_GEM_DOMAIN_OA) {
185 places[c].fpfn = 0;
186 places[c].lpfn = 0;
187 places[c].mem_type = AMDGPU_PL_OA;
188 places[c].flags = 0;
189 c++;
190 }
191
192 if (!c) {
193 places[c].fpfn = 0;
194 places[c].lpfn = 0;
195 places[c].mem_type = TTM_PL_SYSTEM;
196 places[c].flags = 0;
197 c++;
198 }
199
200 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
201
202 placement->num_placement = c;
203 placement->placement = places;
204
205 placement->num_busy_placement = c;
206 placement->busy_placement = places;
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
229 unsigned long size, int align,
230 u32 domain, struct amdgpu_bo **bo_ptr,
231 u64 *gpu_addr, void **cpu_addr)
232{
233 struct amdgpu_bo_param bp;
234 bool free = false;
235 int r;
236
237 if (!size) {
238 amdgpu_bo_unref(bo_ptr);
239 return 0;
240 }
241
242 memset(&bp, 0, sizeof(bp));
243 bp.size = size;
244 bp.byte_align = align;
245 bp.domain = domain;
246 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
247 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
248 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
249 bp.type = ttm_bo_type_kernel;
250 bp.resv = NULL;
251
252 if (!*bo_ptr) {
253 r = amdgpu_bo_create(adev, &bp, bo_ptr);
254 if (r) {
255 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
256 r);
257 return r;
258 }
259 free = true;
260 }
261
262 r = amdgpu_bo_reserve(*bo_ptr, false);
263 if (r) {
264 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
265 goto error_free;
266 }
267
268 r = amdgpu_bo_pin(*bo_ptr, domain);
269 if (r) {
270 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
271 goto error_unreserve;
272 }
273
274 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
275 if (r) {
276 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
277 goto error_unpin;
278 }
279
280 if (gpu_addr)
281 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
282
283 if (cpu_addr) {
284 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
285 if (r) {
286 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
287 goto error_unpin;
288 }
289 }
290
291 return 0;
292
293error_unpin:
294 amdgpu_bo_unpin(*bo_ptr);
295error_unreserve:
296 amdgpu_bo_unreserve(*bo_ptr);
297
298error_free:
299 if (free)
300 amdgpu_bo_unref(bo_ptr);
301
302 return r;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
324 unsigned long size, int align,
325 u32 domain, struct amdgpu_bo **bo_ptr,
326 u64 *gpu_addr, void **cpu_addr)
327{
328 int r;
329
330 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
331 gpu_addr, cpu_addr);
332
333 if (r)
334 return r;
335
336 if (*bo_ptr)
337 amdgpu_bo_unreserve(*bo_ptr);
338
339 return 0;
340}
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
358 uint64_t offset, uint64_t size, uint32_t domain,
359 struct amdgpu_bo **bo_ptr, void **cpu_addr)
360{
361 struct ttm_operation_ctx ctx = { false, false };
362 unsigned int i;
363 int r;
364
365 offset &= PAGE_MASK;
366 size = ALIGN(size, PAGE_SIZE);
367
368 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
369 NULL, cpu_addr);
370 if (r)
371 return r;
372
373 if ((*bo_ptr) == NULL)
374 return 0;
375
376
377
378
379
380 if (cpu_addr)
381 amdgpu_bo_kunmap(*bo_ptr);
382
383 ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
384
385 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
386 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
387 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
388 }
389 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
390 &(*bo_ptr)->tbo.mem, &ctx);
391 if (r)
392 goto error;
393
394 if (cpu_addr) {
395 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
396 if (r)
397 goto error;
398 }
399
400 amdgpu_bo_unreserve(*bo_ptr);
401 return 0;
402
403error:
404 amdgpu_bo_unreserve(*bo_ptr);
405 amdgpu_bo_unref(bo_ptr);
406 return r;
407}
408
409
410
411
412
413
414
415
416
417
418void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
419 void **cpu_addr)
420{
421 if (*bo == NULL)
422 return;
423
424 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
425 if (cpu_addr)
426 amdgpu_bo_kunmap(*bo);
427
428 amdgpu_bo_unpin(*bo);
429 amdgpu_bo_unreserve(*bo);
430 }
431 amdgpu_bo_unref(bo);
432
433 if (gpu_addr)
434 *gpu_addr = 0;
435
436 if (cpu_addr)
437 *cpu_addr = NULL;
438}
439
440
441static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
442 unsigned long size, u32 domain)
443{
444 struct ttm_resource_manager *man = NULL;
445
446
447
448
449
450 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
451 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
452
453 if (size < (man->size << PAGE_SHIFT))
454 return true;
455 else
456 goto fail;
457 }
458
459 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
460 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
461
462 if (size < (man->size << PAGE_SHIFT))
463 return true;
464 else
465 goto fail;
466 }
467
468
469
470 return true;
471
472fail:
473 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
474 man->size << PAGE_SHIFT);
475 return false;
476}
477
478bool amdgpu_bo_support_uswc(u64 bo_flags)
479{
480
481#ifdef CONFIG_X86_32
482
483
484
485 return false;
486#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
487
488
489
490
491
492#ifndef CONFIG_COMPILE_TEST
493#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
494 thanks to write-combining
495#endif
496
497 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
498 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
499 "better performance thanks to write-combining\n");
500 return false;
501#else
502
503
504
505 if (!drm_arch_can_wc_memory())
506 return false;
507
508 return true;
509#endif
510}
511
512static int amdgpu_bo_do_create(struct amdgpu_device *adev,
513 struct amdgpu_bo_param *bp,
514 struct amdgpu_bo **bo_ptr)
515{
516 struct ttm_operation_ctx ctx = {
517 .interruptible = (bp->type != ttm_bo_type_kernel),
518 .no_wait_gpu = bp->no_wait_gpu,
519
520 .gfp_retry_mayfail = true,
521 .allow_res_evict = bp->type != ttm_bo_type_kernel,
522 .resv = bp->resv
523 };
524 struct amdgpu_bo *bo;
525 unsigned long page_align, size = bp->size;
526 size_t acc_size;
527 int r;
528
529
530 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
531
532 page_align = bp->byte_align;
533 size <<= PAGE_SHIFT;
534 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
535
536 page_align = ALIGN(bp->byte_align, 4);
537 size = ALIGN(size, 4) << PAGE_SHIFT;
538 } else {
539
540 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
541 size = ALIGN(size, PAGE_SIZE);
542 }
543
544 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
545 return -ENOMEM;
546
547 *bo_ptr = NULL;
548
549 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
550 sizeof(struct amdgpu_bo));
551
552 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
553 if (bo == NULL)
554 return -ENOMEM;
555 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
556 INIT_LIST_HEAD(&bo->shadow_list);
557 bo->vm_bo = NULL;
558 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
559 bp->domain;
560 bo->allowed_domains = bo->preferred_domains;
561 if (bp->type != ttm_bo_type_kernel &&
562 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
563 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
564
565 bo->flags = bp->flags;
566
567 if (!amdgpu_bo_support_uswc(bo->flags))
568 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
569
570 bo->tbo.bdev = &adev->mman.bdev;
571 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
572 AMDGPU_GEM_DOMAIN_GDS))
573 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
574 else
575 amdgpu_bo_placement_from_domain(bo, bp->domain);
576 if (bp->type == ttm_bo_type_kernel)
577 bo->tbo.priority = 1;
578
579 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
580 &bo->placement, page_align, &ctx, acc_size,
581 NULL, bp->resv, &amdgpu_bo_destroy);
582 if (unlikely(r != 0))
583 return r;
584
585 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
586 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
587 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
588 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
589 ctx.bytes_moved);
590 else
591 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
592
593 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
594 bo->tbo.mem.mem_type == TTM_PL_VRAM) {
595 struct dma_fence *fence;
596
597 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
598 if (unlikely(r))
599 goto fail_unreserve;
600
601 amdgpu_bo_fence(bo, fence, false);
602 dma_fence_put(bo->tbo.moving);
603 bo->tbo.moving = dma_fence_get(fence);
604 dma_fence_put(fence);
605 }
606 if (!bp->resv)
607 amdgpu_bo_unreserve(bo);
608 *bo_ptr = bo;
609
610 trace_amdgpu_bo_create(bo);
611
612
613 if (bp->type == ttm_bo_type_device)
614 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
615
616 return 0;
617
618fail_unreserve:
619 if (!bp->resv)
620 dma_resv_unlock(bo->tbo.base.resv);
621 amdgpu_bo_unref(&bo);
622 return r;
623}
624
625static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
626 unsigned long size,
627 struct amdgpu_bo *bo)
628{
629 struct amdgpu_bo_param bp;
630 int r;
631
632 if (bo->shadow)
633 return 0;
634
635 memset(&bp, 0, sizeof(bp));
636 bp.size = size;
637 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
638 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
639 AMDGPU_GEM_CREATE_SHADOW;
640 bp.type = ttm_bo_type_kernel;
641 bp.resv = bo->tbo.base.resv;
642
643 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
644 if (!r) {
645 bo->shadow->parent = amdgpu_bo_ref(bo);
646 mutex_lock(&adev->shadow_list_lock);
647 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
648 mutex_unlock(&adev->shadow_list_lock);
649 }
650
651 return r;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668int amdgpu_bo_create(struct amdgpu_device *adev,
669 struct amdgpu_bo_param *bp,
670 struct amdgpu_bo **bo_ptr)
671{
672 u64 flags = bp->flags;
673 int r;
674
675 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
676 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
677 if (r)
678 return r;
679
680 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
681 if (!bp->resv)
682 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
683 NULL));
684
685 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
686
687 if (!bp->resv)
688 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
689
690 if (r)
691 amdgpu_bo_unref(bo_ptr);
692 }
693
694 return r;
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709int amdgpu_bo_validate(struct amdgpu_bo *bo)
710{
711 struct ttm_operation_ctx ctx = { false, false };
712 uint32_t domain;
713 int r;
714
715 if (bo->tbo.pin_count)
716 return 0;
717
718 domain = bo->preferred_domains;
719
720retry:
721 amdgpu_bo_placement_from_domain(bo, domain);
722 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
723 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
724 domain = bo->allowed_domains;
725 goto retry;
726 }
727
728 return r;
729}
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
745
746{
747 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
748 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
749 uint64_t shadow_addr, parent_addr;
750
751 shadow_addr = amdgpu_bo_gpu_offset(shadow);
752 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
753
754 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
755 amdgpu_bo_size(shadow), NULL, fence,
756 true, false, false);
757}
758
759
760
761
762
763
764
765
766
767
768
769
770int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
771{
772 void *kptr;
773 long r;
774
775 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
776 return -EPERM;
777
778 kptr = amdgpu_bo_kptr(bo);
779 if (kptr) {
780 if (ptr)
781 *ptr = kptr;
782 return 0;
783 }
784
785 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
786 MAX_SCHEDULE_TIMEOUT);
787 if (r < 0)
788 return r;
789
790 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
791 if (r)
792 return r;
793
794 if (ptr)
795 *ptr = amdgpu_bo_kptr(bo);
796
797 return 0;
798}
799
800
801
802
803
804
805
806
807
808
809void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
810{
811 bool is_iomem;
812
813 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
814}
815
816
817
818
819
820
821
822void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
823{
824 if (bo->kmap.bo)
825 ttm_bo_kunmap(&bo->kmap);
826}
827
828
829
830
831
832
833
834
835
836
837struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
838{
839 if (bo == NULL)
840 return NULL;
841
842 ttm_bo_get(&bo->tbo);
843 return bo;
844}
845
846
847
848
849
850
851
852void amdgpu_bo_unref(struct amdgpu_bo **bo)
853{
854 struct ttm_buffer_object *tbo;
855
856 if ((*bo) == NULL)
857 return;
858
859 tbo = &((*bo)->tbo);
860 ttm_bo_put(tbo);
861 *bo = NULL;
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
887 u64 min_offset, u64 max_offset)
888{
889 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
890 struct ttm_operation_ctx ctx = { false, false };
891 int r, i;
892
893 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
894 return -EPERM;
895
896 if (WARN_ON_ONCE(min_offset > max_offset))
897 return -EINVAL;
898
899
900 if (bo->prime_shared_count || bo->tbo.base.import_attach) {
901 if (domain & AMDGPU_GEM_DOMAIN_GTT)
902 domain = AMDGPU_GEM_DOMAIN_GTT;
903 else
904 return -EINVAL;
905 }
906
907
908
909
910 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
911
912 if (bo->tbo.pin_count) {
913 uint32_t mem_type = bo->tbo.mem.mem_type;
914 uint32_t mem_flags = bo->tbo.mem.placement;
915
916 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
917 return -EINVAL;
918
919 if ((mem_type == TTM_PL_VRAM) &&
920 (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
921 !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
922 return -EINVAL;
923
924 ttm_bo_pin(&bo->tbo);
925
926 if (max_offset != 0) {
927 u64 domain_start = amdgpu_ttm_domain_start(adev,
928 mem_type);
929 WARN_ON_ONCE(max_offset <
930 (amdgpu_bo_gpu_offset(bo) - domain_start));
931 }
932
933 return 0;
934 }
935
936 if (bo->tbo.base.import_attach)
937 dma_buf_pin(bo->tbo.base.import_attach);
938
939
940 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
941 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
942 amdgpu_bo_placement_from_domain(bo, domain);
943 for (i = 0; i < bo->placement.num_placement; i++) {
944 unsigned fpfn, lpfn;
945
946 fpfn = min_offset >> PAGE_SHIFT;
947 lpfn = max_offset >> PAGE_SHIFT;
948
949 if (fpfn > bo->placements[i].fpfn)
950 bo->placements[i].fpfn = fpfn;
951 if (!bo->placements[i].lpfn ||
952 (lpfn && lpfn < bo->placements[i].lpfn))
953 bo->placements[i].lpfn = lpfn;
954 }
955
956 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
957 if (unlikely(r)) {
958 dev_err(adev->dev, "%p pin failed\n", bo);
959 goto error;
960 }
961
962 ttm_bo_pin(&bo->tbo);
963
964 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
965 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
966 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
967 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
968 &adev->visible_pin_size);
969 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
970 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
971 }
972
973error:
974 return r;
975}
976
977
978
979
980
981
982
983
984
985
986
987
988
989int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
990{
991 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
992 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005void amdgpu_bo_unpin(struct amdgpu_bo *bo)
1006{
1007 ttm_bo_unpin(&bo->tbo);
1008 if (bo->tbo.pin_count)
1009 return;
1010
1011 amdgpu_bo_subtract_pin_size(bo);
1012
1013 if (bo->tbo.base.import_attach)
1014 dma_buf_unpin(bo->tbo.base.import_attach);
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1028{
1029 struct ttm_resource_manager *man;
1030
1031 if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
1032
1033 return 0;
1034 }
1035
1036 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
1037 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1038}
1039
1040static const char *amdgpu_vram_names[] = {
1041 "UNKNOWN",
1042 "GDDR1",
1043 "DDR2",
1044 "GDDR3",
1045 "GDDR4",
1046 "GDDR5",
1047 "HBM",
1048 "DDR3",
1049 "DDR4",
1050 "GDDR6",
1051 "DDR5"
1052};
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063int amdgpu_bo_init(struct amdgpu_device *adev)
1064{
1065
1066 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1067 adev->gmc.aper_size);
1068
1069
1070 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1071 adev->gmc.aper_size);
1072 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1073 adev->gmc.mc_vram_size >> 20,
1074 (unsigned long long)adev->gmc.aper_size >> 20);
1075 DRM_INFO("RAM width %dbits %s\n",
1076 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1077 return amdgpu_ttm_init(adev);
1078}
1079
1080
1081
1082
1083
1084
1085
1086void amdgpu_bo_fini(struct amdgpu_device *adev)
1087{
1088 amdgpu_ttm_fini(adev);
1089 arch_phys_wc_del(adev->gmc.vram_mtrr);
1090 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1104 struct vm_area_struct *vma)
1105{
1106 if (vma->vm_pgoff != 0)
1107 return -EACCES;
1108
1109 return ttm_bo_mmap_obj(vma, &bo->tbo);
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1124{
1125 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1126
1127 if (adev->family <= AMDGPU_FAMILY_CZ &&
1128 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1129 return -EINVAL;
1130
1131 bo->tiling_flags = tiling_flags;
1132 return 0;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1144{
1145 dma_resv_assert_held(bo->tbo.base.resv);
1146
1147 if (tiling_flags)
1148 *tiling_flags = bo->tiling_flags;
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1165 uint32_t metadata_size, uint64_t flags)
1166{
1167 void *buffer;
1168
1169 if (!metadata_size) {
1170 if (bo->metadata_size) {
1171 kfree(bo->metadata);
1172 bo->metadata = NULL;
1173 bo->metadata_size = 0;
1174 }
1175 return 0;
1176 }
1177
1178 if (metadata == NULL)
1179 return -EINVAL;
1180
1181 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1182 if (buffer == NULL)
1183 return -ENOMEM;
1184
1185 kfree(bo->metadata);
1186 bo->metadata_flags = flags;
1187 bo->metadata = buffer;
1188 bo->metadata_size = metadata_size;
1189
1190 return 0;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1209 size_t buffer_size, uint32_t *metadata_size,
1210 uint64_t *flags)
1211{
1212 if (!buffer && !metadata_size)
1213 return -EINVAL;
1214
1215 if (buffer) {
1216 if (buffer_size < bo->metadata_size)
1217 return -EINVAL;
1218
1219 if (bo->metadata_size)
1220 memcpy(buffer, bo->metadata, bo->metadata_size);
1221 }
1222
1223 if (metadata_size)
1224 *metadata_size = bo->metadata_size;
1225 if (flags)
1226 *flags = bo->metadata_flags;
1227
1228 return 0;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1242 bool evict,
1243 struct ttm_resource *new_mem)
1244{
1245 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1246 struct amdgpu_bo *abo;
1247 struct ttm_resource *old_mem = &bo->mem;
1248
1249 if (!amdgpu_bo_is_amdgpu_bo(bo))
1250 return;
1251
1252 abo = ttm_to_amdgpu_bo(bo);
1253 amdgpu_vm_bo_invalidate(adev, abo, evict);
1254
1255 amdgpu_bo_kunmap(abo);
1256
1257 if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
1258 bo->mem.mem_type != TTM_PL_SYSTEM)
1259 dma_buf_move_notify(abo->tbo.base.dma_buf);
1260
1261
1262 if (evict)
1263 atomic64_inc(&adev->num_evictions);
1264
1265
1266 if (!new_mem)
1267 return;
1268
1269
1270 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1281{
1282 struct dma_fence *fence = NULL;
1283 struct amdgpu_bo *abo;
1284 int r;
1285
1286 if (!amdgpu_bo_is_amdgpu_bo(bo))
1287 return;
1288
1289 abo = ttm_to_amdgpu_bo(bo);
1290
1291 if (abo->kfd_bo)
1292 amdgpu_amdkfd_unreserve_memory_limit(abo);
1293
1294
1295 WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
1296 && bo->base.resv != &bo->base._resv);
1297 if (bo->base.resv == &bo->base._resv)
1298 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
1299
1300 if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1301 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1302 return;
1303
1304 dma_resv_lock(bo->base.resv, NULL);
1305
1306 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1307 if (!WARN_ON(r)) {
1308 amdgpu_bo_fence(abo, fence, false);
1309 dma_fence_put(fence);
1310 }
1311
1312 dma_resv_unlock(bo->base.resv);
1313}
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1327{
1328 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1329 struct ttm_operation_ctx ctx = { false, false };
1330 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1331 unsigned long offset, size;
1332 int r;
1333
1334
1335 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1336
1337 if (bo->mem.mem_type != TTM_PL_VRAM)
1338 return 0;
1339
1340 size = bo->mem.num_pages << PAGE_SHIFT;
1341 offset = bo->mem.start << PAGE_SHIFT;
1342 if ((offset + size) <= adev->gmc.visible_vram_size)
1343 return 0;
1344
1345
1346 if (abo->tbo.pin_count > 0)
1347 return VM_FAULT_SIGBUS;
1348
1349
1350 atomic64_inc(&adev->num_vram_cpu_page_faults);
1351 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1352 AMDGPU_GEM_DOMAIN_GTT);
1353
1354
1355 abo->placement.num_busy_placement = 1;
1356 abo->placement.busy_placement = &abo->placements[1];
1357
1358 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1359 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
1360 return VM_FAULT_NOPAGE;
1361 else if (unlikely(r))
1362 return VM_FAULT_SIGBUS;
1363
1364 offset = bo->mem.start << PAGE_SHIFT;
1365
1366 if (bo->mem.mem_type == TTM_PL_VRAM &&
1367 (offset + size) > adev->gmc.visible_vram_size)
1368 return VM_FAULT_SIGBUS;
1369
1370 ttm_bo_move_to_lru_tail_unlocked(bo);
1371 return 0;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1383 bool shared)
1384{
1385 struct dma_resv *resv = bo->tbo.base.resv;
1386
1387 if (shared)
1388 dma_resv_add_shared_fence(resv, fence);
1389 else
1390 dma_resv_add_excl_fence(resv, fence);
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
1408 enum amdgpu_sync_mode sync_mode, void *owner,
1409 bool intr)
1410{
1411 struct amdgpu_sync sync;
1412 int r;
1413
1414 amdgpu_sync_create(&sync);
1415 amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
1416 r = amdgpu_sync_wait(&sync, intr);
1417 amdgpu_sync_free(&sync);
1418 return r;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1432{
1433 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1434
1435 return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
1436 AMDGPU_SYNC_NE_OWNER, owner, intr);
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1450{
1451 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1452 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1453 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
1454 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1455 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1456 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1457
1458 return amdgpu_bo_gpu_offset_no_check(bo);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
1469{
1470 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1471 uint64_t offset;
1472
1473 offset = (bo->tbo.mem.start << PAGE_SHIFT) +
1474 amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
1475
1476 return amdgpu_gmc_sign_extend(offset);
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1488 uint32_t domain)
1489{
1490 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1491 domain = AMDGPU_GEM_DOMAIN_VRAM;
1492 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1493 domain = AMDGPU_GEM_DOMAIN_GTT;
1494 }
1495 return domain;
1496}
1497
1498#if defined(CONFIG_DEBUG_FS)
1499#define amdgpu_bo_print_flag(m, bo, flag) \
1500 do { \
1501 if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
1502 seq_printf((m), " " #flag); \
1503 } \
1504 } while (0)
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
1519{
1520 struct dma_buf_attachment *attachment;
1521 struct dma_buf *dma_buf;
1522 unsigned int domain;
1523 const char *placement;
1524 unsigned int pin_count;
1525 u64 size;
1526
1527 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
1528 switch (domain) {
1529 case AMDGPU_GEM_DOMAIN_VRAM:
1530 placement = "VRAM";
1531 break;
1532 case AMDGPU_GEM_DOMAIN_GTT:
1533 placement = " GTT";
1534 break;
1535 case AMDGPU_GEM_DOMAIN_CPU:
1536 default:
1537 placement = " CPU";
1538 break;
1539 }
1540
1541 size = amdgpu_bo_size(bo);
1542 seq_printf(m, "\t\t0x%08x: %12lld byte %s",
1543 id, size, placement);
1544
1545 pin_count = READ_ONCE(bo->tbo.pin_count);
1546 if (pin_count)
1547 seq_printf(m, " pin count %d", pin_count);
1548
1549 dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
1550 attachment = READ_ONCE(bo->tbo.base.import_attach);
1551
1552 if (attachment)
1553 seq_printf(m, " imported from %p", dma_buf);
1554 else if (dma_buf)
1555 seq_printf(m, " exported as %p", dma_buf);
1556
1557 amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
1558 amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
1559 amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
1560 amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
1561 amdgpu_bo_print_flag(m, bo, SHADOW);
1562 amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
1563 amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
1564 amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
1565
1566 seq_puts(m, "\n");
1567
1568 return size;
1569}
1570#endif
1571