1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/list.h>
33#include <linux/slab.h>
34
35#include <drm/amdgpu_drm.h>
36#include <drm/drm_cache.h>
37#include "amdgpu.h"
38#include "amdgpu_trace.h"
39#include "amdgpu_amdkfd.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
63{
64 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
65
66 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
67 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
68 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
69 &adev->visible_pin_size);
70 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
71 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
72 }
73}
74
75static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
76{
77 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
78 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
79
80 if (bo->pin_count > 0)
81 amdgpu_bo_subtract_pin_size(bo);
82
83 amdgpu_bo_kunmap(bo);
84
85 if (bo->tbo.base.import_attach)
86 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
87 drm_gem_object_release(&bo->tbo.base);
88
89 if (!list_empty(&bo->shadow_list)) {
90 mutex_lock(&adev->shadow_list_lock);
91 list_del_init(&bo->shadow_list);
92 mutex_unlock(&adev->shadow_list_lock);
93 }
94 amdgpu_bo_unref(&bo->parent);
95
96 kfree(bo->metadata);
97 kfree(bo);
98}
99
100
101
102
103
104
105
106
107
108
109
110bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
111{
112 if (bo->destroy == &amdgpu_bo_destroy)
113 return true;
114 return false;
115}
116
117
118
119
120
121
122
123
124
125void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
126{
127 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
128 struct ttm_placement *placement = &abo->placement;
129 struct ttm_place *places = abo->placements;
130 u64 flags = abo->flags;
131 u32 c = 0;
132
133 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
134 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
135
136 places[c].fpfn = 0;
137 places[c].lpfn = 0;
138 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
139 TTM_PL_FLAG_VRAM;
140
141 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
142 places[c].lpfn = visible_pfn;
143 else
144 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
145
146 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
147 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
148 c++;
149 }
150
151 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
152 places[c].fpfn = 0;
153 places[c].lpfn = 0;
154 places[c].flags = TTM_PL_FLAG_TT;
155 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
156 places[c].flags |= TTM_PL_FLAG_WC |
157 TTM_PL_FLAG_UNCACHED;
158 else
159 places[c].flags |= TTM_PL_FLAG_CACHED;
160 c++;
161 }
162
163 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
164 places[c].fpfn = 0;
165 places[c].lpfn = 0;
166 places[c].flags = TTM_PL_FLAG_SYSTEM;
167 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
168 places[c].flags |= TTM_PL_FLAG_WC |
169 TTM_PL_FLAG_UNCACHED;
170 else
171 places[c].flags |= TTM_PL_FLAG_CACHED;
172 c++;
173 }
174
175 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
176 places[c].fpfn = 0;
177 places[c].lpfn = 0;
178 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
179 c++;
180 }
181
182 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
183 places[c].fpfn = 0;
184 places[c].lpfn = 0;
185 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
186 c++;
187 }
188
189 if (domain & AMDGPU_GEM_DOMAIN_OA) {
190 places[c].fpfn = 0;
191 places[c].lpfn = 0;
192 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
193 c++;
194 }
195
196 if (!c) {
197 places[c].fpfn = 0;
198 places[c].lpfn = 0;
199 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
200 c++;
201 }
202
203 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
204
205 placement->num_placement = c;
206 placement->placement = places;
207
208 placement->num_busy_placement = c;
209 placement->busy_placement = places;
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
232 unsigned long size, int align,
233 u32 domain, struct amdgpu_bo **bo_ptr,
234 u64 *gpu_addr, void **cpu_addr)
235{
236 struct amdgpu_bo_param bp;
237 bool free = false;
238 int r;
239
240 if (!size) {
241 amdgpu_bo_unref(bo_ptr);
242 return 0;
243 }
244
245 memset(&bp, 0, sizeof(bp));
246 bp.size = size;
247 bp.byte_align = align;
248 bp.domain = domain;
249 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
250 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
251 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
252 bp.type = ttm_bo_type_kernel;
253 bp.resv = NULL;
254
255 if (!*bo_ptr) {
256 r = amdgpu_bo_create(adev, &bp, bo_ptr);
257 if (r) {
258 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
259 r);
260 return r;
261 }
262 free = true;
263 }
264
265 r = amdgpu_bo_reserve(*bo_ptr, false);
266 if (r) {
267 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
268 goto error_free;
269 }
270
271 r = amdgpu_bo_pin(*bo_ptr, domain);
272 if (r) {
273 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
274 goto error_unreserve;
275 }
276
277 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
278 if (r) {
279 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
280 goto error_unpin;
281 }
282
283 if (gpu_addr)
284 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
285
286 if (cpu_addr) {
287 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
288 if (r) {
289 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
290 goto error_unpin;
291 }
292 }
293
294 return 0;
295
296error_unpin:
297 amdgpu_bo_unpin(*bo_ptr);
298error_unreserve:
299 amdgpu_bo_unreserve(*bo_ptr);
300
301error_free:
302 if (free)
303 amdgpu_bo_unref(bo_ptr);
304
305 return r;
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
327 unsigned long size, int align,
328 u32 domain, struct amdgpu_bo **bo_ptr,
329 u64 *gpu_addr, void **cpu_addr)
330{
331 int r;
332
333 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
334 gpu_addr, cpu_addr);
335
336 if (r)
337 return r;
338
339 if (*bo_ptr)
340 amdgpu_bo_unreserve(*bo_ptr);
341
342 return 0;
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
361 uint64_t offset, uint64_t size, uint32_t domain,
362 struct amdgpu_bo **bo_ptr, void **cpu_addr)
363{
364 struct ttm_operation_ctx ctx = { false, false };
365 unsigned int i;
366 int r;
367
368 offset &= PAGE_MASK;
369 size = ALIGN(size, PAGE_SIZE);
370
371 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
372 NULL, cpu_addr);
373 if (r)
374 return r;
375
376
377
378
379
380 if (cpu_addr)
381 amdgpu_bo_kunmap(*bo_ptr);
382
383 ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
384
385 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
386 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
387 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
388 }
389 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
390 &(*bo_ptr)->tbo.mem, &ctx);
391 if (r)
392 goto error;
393
394 if (cpu_addr) {
395 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
396 if (r)
397 goto error;
398 }
399
400 amdgpu_bo_unreserve(*bo_ptr);
401 return 0;
402
403error:
404 amdgpu_bo_unreserve(*bo_ptr);
405 amdgpu_bo_unref(bo_ptr);
406 return r;
407}
408
409
410
411
412
413
414
415
416
417
418void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
419 void **cpu_addr)
420{
421 if (*bo == NULL)
422 return;
423
424 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
425 if (cpu_addr)
426 amdgpu_bo_kunmap(*bo);
427
428 amdgpu_bo_unpin(*bo);
429 amdgpu_bo_unreserve(*bo);
430 }
431 amdgpu_bo_unref(bo);
432
433 if (gpu_addr)
434 *gpu_addr = 0;
435
436 if (cpu_addr)
437 *cpu_addr = NULL;
438}
439
440
441static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
442 unsigned long size, u32 domain)
443{
444 struct ttm_mem_type_manager *man = NULL;
445
446
447
448
449
450 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
451 man = &adev->mman.bdev.man[TTM_PL_TT];
452
453 if (size < (man->size << PAGE_SHIFT))
454 return true;
455 else
456 goto fail;
457 }
458
459 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
460 man = &adev->mman.bdev.man[TTM_PL_VRAM];
461
462 if (size < (man->size << PAGE_SHIFT))
463 return true;
464 else
465 goto fail;
466 }
467
468
469
470 return true;
471
472fail:
473 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
474 man->size << PAGE_SHIFT);
475 return false;
476}
477
478bool amdgpu_bo_support_uswc(u64 bo_flags)
479{
480
481#ifdef CONFIG_X86_32
482
483
484
485 return false;
486#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
487
488
489
490
491
492#ifndef CONFIG_COMPILE_TEST
493#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
494 thanks to write-combining
495#endif
496
497 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
498 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
499 "better performance thanks to write-combining\n");
500 return false;
501#else
502
503
504
505 if (!drm_arch_can_wc_memory())
506 return false;
507
508 return true;
509#endif
510}
511
512static int amdgpu_bo_do_create(struct amdgpu_device *adev,
513 struct amdgpu_bo_param *bp,
514 struct amdgpu_bo **bo_ptr)
515{
516 struct ttm_operation_ctx ctx = {
517 .interruptible = (bp->type != ttm_bo_type_kernel),
518 .no_wait_gpu = bp->no_wait_gpu,
519 .resv = bp->resv,
520 .flags = bp->type != ttm_bo_type_kernel ?
521 TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
522 };
523 struct amdgpu_bo *bo;
524 unsigned long page_align, size = bp->size;
525 size_t acc_size;
526 int r;
527
528
529 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
530
531 page_align = bp->byte_align;
532 size <<= PAGE_SHIFT;
533 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
534
535 page_align = ALIGN(bp->byte_align, 4);
536 size = ALIGN(size, 4) << PAGE_SHIFT;
537 } else {
538
539 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
540 size = ALIGN(size, PAGE_SIZE);
541 }
542
543 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
544 return -ENOMEM;
545
546 *bo_ptr = NULL;
547
548 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
549 sizeof(struct amdgpu_bo));
550
551 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
552 if (bo == NULL)
553 return -ENOMEM;
554 drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
555 INIT_LIST_HEAD(&bo->shadow_list);
556 bo->vm_bo = NULL;
557 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
558 bp->domain;
559 bo->allowed_domains = bo->preferred_domains;
560 if (bp->type != ttm_bo_type_kernel &&
561 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
562 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
563
564 bo->flags = bp->flags;
565
566 if (!amdgpu_bo_support_uswc(bo->flags))
567 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
568
569 bo->tbo.bdev = &adev->mman.bdev;
570 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
571 AMDGPU_GEM_DOMAIN_GDS))
572 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
573 else
574 amdgpu_bo_placement_from_domain(bo, bp->domain);
575 if (bp->type == ttm_bo_type_kernel)
576 bo->tbo.priority = 1;
577
578 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
579 &bo->placement, page_align, &ctx, acc_size,
580 NULL, bp->resv, &amdgpu_bo_destroy);
581 if (unlikely(r != 0))
582 return r;
583
584 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
585 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
586 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
587 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
588 ctx.bytes_moved);
589 else
590 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
591
592 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
593 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
594 struct dma_fence *fence;
595
596 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
597 if (unlikely(r))
598 goto fail_unreserve;
599
600 amdgpu_bo_fence(bo, fence, false);
601 dma_fence_put(bo->tbo.moving);
602 bo->tbo.moving = dma_fence_get(fence);
603 dma_fence_put(fence);
604 }
605 if (!bp->resv)
606 amdgpu_bo_unreserve(bo);
607 *bo_ptr = bo;
608
609 trace_amdgpu_bo_create(bo);
610
611
612 if (bp->type == ttm_bo_type_device)
613 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
614
615 return 0;
616
617fail_unreserve:
618 if (!bp->resv)
619 dma_resv_unlock(bo->tbo.base.resv);
620 amdgpu_bo_unref(&bo);
621 return r;
622}
623
624static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
625 unsigned long size,
626 struct amdgpu_bo *bo)
627{
628 struct amdgpu_bo_param bp;
629 int r;
630
631 if (bo->shadow)
632 return 0;
633
634 memset(&bp, 0, sizeof(bp));
635 bp.size = size;
636 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
637 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
638 AMDGPU_GEM_CREATE_SHADOW;
639 bp.type = ttm_bo_type_kernel;
640 bp.resv = bo->tbo.base.resv;
641
642 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
643 if (!r) {
644 bo->shadow->parent = amdgpu_bo_ref(bo);
645 mutex_lock(&adev->shadow_list_lock);
646 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
647 mutex_unlock(&adev->shadow_list_lock);
648 }
649
650 return r;
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667int amdgpu_bo_create(struct amdgpu_device *adev,
668 struct amdgpu_bo_param *bp,
669 struct amdgpu_bo **bo_ptr)
670{
671 u64 flags = bp->flags;
672 int r;
673
674 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
675 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
676 if (r)
677 return r;
678
679 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
680 if (!bp->resv)
681 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
682 NULL));
683
684 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
685
686 if (!bp->resv)
687 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
688
689 if (r)
690 amdgpu_bo_unref(bo_ptr);
691 }
692
693 return r;
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708int amdgpu_bo_validate(struct amdgpu_bo *bo)
709{
710 struct ttm_operation_ctx ctx = { false, false };
711 uint32_t domain;
712 int r;
713
714 if (bo->pin_count)
715 return 0;
716
717 domain = bo->preferred_domains;
718
719retry:
720 amdgpu_bo_placement_from_domain(bo, domain);
721 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
722 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
723 domain = bo->allowed_domains;
724 goto retry;
725 }
726
727 return r;
728}
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
744
745{
746 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
747 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
748 uint64_t shadow_addr, parent_addr;
749
750 shadow_addr = amdgpu_bo_gpu_offset(shadow);
751 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
752
753 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
754 amdgpu_bo_size(shadow), NULL, fence,
755 true, false);
756}
757
758
759
760
761
762
763
764
765
766
767
768
769int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
770{
771 void *kptr;
772 long r;
773
774 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
775 return -EPERM;
776
777 kptr = amdgpu_bo_kptr(bo);
778 if (kptr) {
779 if (ptr)
780 *ptr = kptr;
781 return 0;
782 }
783
784 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
785 MAX_SCHEDULE_TIMEOUT);
786 if (r < 0)
787 return r;
788
789 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
790 if (r)
791 return r;
792
793 if (ptr)
794 *ptr = amdgpu_bo_kptr(bo);
795
796 return 0;
797}
798
799
800
801
802
803
804
805
806
807
808void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
809{
810 bool is_iomem;
811
812 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
813}
814
815
816
817
818
819
820
821void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
822{
823 if (bo->kmap.bo)
824 ttm_bo_kunmap(&bo->kmap);
825}
826
827
828
829
830
831
832
833
834
835
836struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
837{
838 if (bo == NULL)
839 return NULL;
840
841 ttm_bo_get(&bo->tbo);
842 return bo;
843}
844
845
846
847
848
849
850
851void amdgpu_bo_unref(struct amdgpu_bo **bo)
852{
853 struct ttm_buffer_object *tbo;
854
855 if ((*bo) == NULL)
856 return;
857
858 tbo = &((*bo)->tbo);
859 ttm_bo_put(tbo);
860 *bo = NULL;
861}
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
886 u64 min_offset, u64 max_offset)
887{
888 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
889 struct ttm_operation_ctx ctx = { false, false };
890 int r, i;
891
892 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
893 return -EPERM;
894
895 if (WARN_ON_ONCE(min_offset > max_offset))
896 return -EINVAL;
897
898
899 if (bo->prime_shared_count) {
900 if (domain & AMDGPU_GEM_DOMAIN_GTT)
901 domain = AMDGPU_GEM_DOMAIN_GTT;
902 else
903 return -EINVAL;
904 }
905
906
907
908
909 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
910
911 if (bo->pin_count) {
912 uint32_t mem_type = bo->tbo.mem.mem_type;
913
914 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
915 return -EINVAL;
916
917 bo->pin_count++;
918
919 if (max_offset != 0) {
920 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
921 WARN_ON_ONCE(max_offset <
922 (amdgpu_bo_gpu_offset(bo) - domain_start));
923 }
924
925 return 0;
926 }
927
928 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
929
930 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
931 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
932 amdgpu_bo_placement_from_domain(bo, domain);
933 for (i = 0; i < bo->placement.num_placement; i++) {
934 unsigned fpfn, lpfn;
935
936 fpfn = min_offset >> PAGE_SHIFT;
937 lpfn = max_offset >> PAGE_SHIFT;
938
939 if (fpfn > bo->placements[i].fpfn)
940 bo->placements[i].fpfn = fpfn;
941 if (!bo->placements[i].lpfn ||
942 (lpfn && lpfn < bo->placements[i].lpfn))
943 bo->placements[i].lpfn = lpfn;
944 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
945 }
946
947 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
948 if (unlikely(r)) {
949 dev_err(adev->dev, "%p pin failed\n", bo);
950 goto error;
951 }
952
953 bo->pin_count = 1;
954
955 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
956 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
957 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
958 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
959 &adev->visible_pin_size);
960 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
961 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
962 }
963
964error:
965 return r;
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
981{
982 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
983}
984
985
986
987
988
989
990
991
992
993
994
995int amdgpu_bo_unpin(struct amdgpu_bo *bo)
996{
997 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
998 struct ttm_operation_ctx ctx = { false, false };
999 int r, i;
1000
1001 if (WARN_ON_ONCE(!bo->pin_count)) {
1002 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
1003 return 0;
1004 }
1005 bo->pin_count--;
1006 if (bo->pin_count)
1007 return 0;
1008
1009 amdgpu_bo_subtract_pin_size(bo);
1010
1011 for (i = 0; i < bo->placement.num_placement; i++) {
1012 bo->placements[i].lpfn = 0;
1013 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1014 }
1015 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1016 if (unlikely(r))
1017 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
1018
1019 return r;
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1033{
1034
1035#ifndef CONFIG_HIBERNATION
1036 if (adev->flags & AMD_IS_APU) {
1037
1038 return 0;
1039 }
1040#endif
1041 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1042}
1043
1044static const char *amdgpu_vram_names[] = {
1045 "UNKNOWN",
1046 "GDDR1",
1047 "DDR2",
1048 "GDDR3",
1049 "GDDR4",
1050 "GDDR5",
1051 "HBM",
1052 "DDR3",
1053 "DDR4",
1054 "GDDR6",
1055};
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066int amdgpu_bo_init(struct amdgpu_device *adev)
1067{
1068
1069 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1070 adev->gmc.aper_size);
1071
1072
1073 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1074 adev->gmc.aper_size);
1075 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1076 adev->gmc.mc_vram_size >> 20,
1077 (unsigned long long)adev->gmc.aper_size >> 20);
1078 DRM_INFO("RAM width %dbits %s\n",
1079 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1080 return amdgpu_ttm_init(adev);
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093int amdgpu_bo_late_init(struct amdgpu_device *adev)
1094{
1095 amdgpu_ttm_late_init(adev);
1096
1097 return 0;
1098}
1099
1100
1101
1102
1103
1104
1105
1106void amdgpu_bo_fini(struct amdgpu_device *adev)
1107{
1108 amdgpu_ttm_fini(adev);
1109 arch_phys_wc_del(adev->gmc.vram_mtrr);
1110 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1124 struct vm_area_struct *vma)
1125{
1126 if (vma->vm_pgoff != 0)
1127 return -EACCES;
1128
1129 return ttm_bo_mmap_obj(vma, &bo->tbo);
1130}
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1144{
1145 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1146
1147 if (adev->family <= AMDGPU_FAMILY_CZ &&
1148 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1149 return -EINVAL;
1150
1151 bo->tiling_flags = tiling_flags;
1152 return 0;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1164{
1165 dma_resv_assert_held(bo->tbo.base.resv);
1166
1167 if (tiling_flags)
1168 *tiling_flags = bo->tiling_flags;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1185 uint32_t metadata_size, uint64_t flags)
1186{
1187 void *buffer;
1188
1189 if (!metadata_size) {
1190 if (bo->metadata_size) {
1191 kfree(bo->metadata);
1192 bo->metadata = NULL;
1193 bo->metadata_size = 0;
1194 }
1195 return 0;
1196 }
1197
1198 if (metadata == NULL)
1199 return -EINVAL;
1200
1201 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1202 if (buffer == NULL)
1203 return -ENOMEM;
1204
1205 kfree(bo->metadata);
1206 bo->metadata_flags = flags;
1207 bo->metadata = buffer;
1208 bo->metadata_size = metadata_size;
1209
1210 return 0;
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1229 size_t buffer_size, uint32_t *metadata_size,
1230 uint64_t *flags)
1231{
1232 if (!buffer && !metadata_size)
1233 return -EINVAL;
1234
1235 if (buffer) {
1236 if (buffer_size < bo->metadata_size)
1237 return -EINVAL;
1238
1239 if (bo->metadata_size)
1240 memcpy(buffer, bo->metadata, bo->metadata_size);
1241 }
1242
1243 if (metadata_size)
1244 *metadata_size = bo->metadata_size;
1245 if (flags)
1246 *flags = bo->metadata_flags;
1247
1248 return 0;
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1262 bool evict,
1263 struct ttm_mem_reg *new_mem)
1264{
1265 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1266 struct amdgpu_bo *abo;
1267 struct ttm_mem_reg *old_mem = &bo->mem;
1268
1269 if (!amdgpu_bo_is_amdgpu_bo(bo))
1270 return;
1271
1272 abo = ttm_to_amdgpu_bo(bo);
1273 amdgpu_vm_bo_invalidate(adev, abo, evict);
1274
1275 amdgpu_bo_kunmap(abo);
1276
1277
1278 if (evict)
1279 atomic64_inc(&adev->num_evictions);
1280
1281
1282 if (!new_mem)
1283 return;
1284
1285
1286 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1297{
1298 struct dma_fence *fence = NULL;
1299 struct amdgpu_bo *abo;
1300 int r;
1301
1302 if (!amdgpu_bo_is_amdgpu_bo(bo))
1303 return;
1304
1305 abo = ttm_to_amdgpu_bo(bo);
1306
1307 if (abo->kfd_bo)
1308 amdgpu_amdkfd_unreserve_memory_limit(abo);
1309
1310 if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1311 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1312 return;
1313
1314 dma_resv_lock(bo->base.resv, NULL);
1315
1316 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1317 if (!WARN_ON(r)) {
1318 amdgpu_bo_fence(abo, fence, false);
1319 dma_fence_put(fence);
1320 }
1321
1322 dma_resv_unlock(bo->base.resv);
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1337{
1338 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1339 struct ttm_operation_ctx ctx = { false, false };
1340 struct amdgpu_bo *abo;
1341 unsigned long offset, size;
1342 int r;
1343
1344 if (!amdgpu_bo_is_amdgpu_bo(bo))
1345 return 0;
1346
1347 abo = ttm_to_amdgpu_bo(bo);
1348
1349
1350 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1351
1352 if (bo->mem.mem_type != TTM_PL_VRAM)
1353 return 0;
1354
1355 size = bo->mem.num_pages << PAGE_SHIFT;
1356 offset = bo->mem.start << PAGE_SHIFT;
1357 if ((offset + size) <= adev->gmc.visible_vram_size)
1358 return 0;
1359
1360
1361 if (abo->pin_count > 0)
1362 return -EINVAL;
1363
1364
1365 atomic64_inc(&adev->num_vram_cpu_page_faults);
1366 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1367 AMDGPU_GEM_DOMAIN_GTT);
1368
1369
1370 abo->placement.num_busy_placement = 1;
1371 abo->placement.busy_placement = &abo->placements[1];
1372
1373 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1374 if (unlikely(r != 0))
1375 return r;
1376
1377 offset = bo->mem.start << PAGE_SHIFT;
1378
1379 if (bo->mem.mem_type == TTM_PL_VRAM &&
1380 (offset + size) > adev->gmc.visible_vram_size)
1381 return -EINVAL;
1382
1383 return 0;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1395 bool shared)
1396{
1397 struct dma_resv *resv = bo->tbo.base.resv;
1398
1399 if (shared)
1400 dma_resv_add_shared_fence(resv, fence);
1401 else
1402 dma_resv_add_excl_fence(resv, fence);
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1416{
1417 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1418 struct amdgpu_sync sync;
1419 int r;
1420
1421 amdgpu_sync_create(&sync);
1422 amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
1423 r = amdgpu_sync_wait(&sync, intr);
1424 amdgpu_sync_free(&sync);
1425
1426 return r;
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1440{
1441 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1442 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1443 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1444 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1445 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1446 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1447
1448 return amdgpu_gmc_sign_extend(bo->tbo.offset);
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1460 uint32_t domain)
1461{
1462 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1463 domain = AMDGPU_GEM_DOMAIN_VRAM;
1464 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1465 domain = AMDGPU_GEM_DOMAIN_GTT;
1466 }
1467 return domain;
1468}
1469