1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/list.h>
33#include <linux/slab.h>
34#include <drm/drmP.h>
35#include <drm/amdgpu_drm.h>
36#include <drm/drm_cache.h>
37#include "amdgpu.h"
38#include "amdgpu_trace.h"
39#include "amdgpu_amdkfd.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
63{
64 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
65
66 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
67 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
68 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
69 &adev->visible_pin_size);
70 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
71 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
72 }
73}
74
75static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
76{
77 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
78 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
79
80 if (bo->pin_count > 0)
81 amdgpu_bo_subtract_pin_size(bo);
82
83 if (bo->kfd_bo)
84 amdgpu_amdkfd_unreserve_memory_limit(bo);
85
86 amdgpu_bo_kunmap(bo);
87
88 if (bo->gem_base.import_attach)
89 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
90 drm_gem_object_release(&bo->gem_base);
91
92 if (!list_empty(&bo->shadow_list)) {
93 mutex_lock(&adev->shadow_list_lock);
94 list_del_init(&bo->shadow_list);
95 mutex_unlock(&adev->shadow_list_lock);
96 }
97 amdgpu_bo_unref(&bo->parent);
98
99 kfree(bo->metadata);
100 kfree(bo);
101}
102
103
104
105
106
107
108
109
110
111
112
113bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
114{
115 if (bo->destroy == &amdgpu_bo_destroy)
116 return true;
117 return false;
118}
119
120
121
122
123
124
125
126
127
128void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
129{
130 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
131 struct ttm_placement *placement = &abo->placement;
132 struct ttm_place *places = abo->placements;
133 u64 flags = abo->flags;
134 u32 c = 0;
135
136 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
137 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
138
139 places[c].fpfn = 0;
140 places[c].lpfn = 0;
141 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
142 TTM_PL_FLAG_VRAM;
143
144 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
145 places[c].lpfn = visible_pfn;
146 else
147 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
148
149 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
150 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
151 c++;
152 }
153
154 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
155 places[c].fpfn = 0;
156 places[c].lpfn = 0;
157 places[c].flags = TTM_PL_FLAG_TT;
158 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
159 places[c].flags |= TTM_PL_FLAG_WC |
160 TTM_PL_FLAG_UNCACHED;
161 else
162 places[c].flags |= TTM_PL_FLAG_CACHED;
163 c++;
164 }
165
166 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
167 places[c].fpfn = 0;
168 places[c].lpfn = 0;
169 places[c].flags = TTM_PL_FLAG_SYSTEM;
170 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
171 places[c].flags |= TTM_PL_FLAG_WC |
172 TTM_PL_FLAG_UNCACHED;
173 else
174 places[c].flags |= TTM_PL_FLAG_CACHED;
175 c++;
176 }
177
178 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
179 places[c].fpfn = 0;
180 places[c].lpfn = 0;
181 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
182 c++;
183 }
184
185 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
186 places[c].fpfn = 0;
187 places[c].lpfn = 0;
188 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
189 c++;
190 }
191
192 if (domain & AMDGPU_GEM_DOMAIN_OA) {
193 places[c].fpfn = 0;
194 places[c].lpfn = 0;
195 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
196 c++;
197 }
198
199 if (!c) {
200 places[c].fpfn = 0;
201 places[c].lpfn = 0;
202 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
203 c++;
204 }
205
206 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
207
208 placement->num_placement = c;
209 placement->placement = places;
210
211 placement->num_busy_placement = c;
212 placement->busy_placement = places;
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
235 unsigned long size, int align,
236 u32 domain, struct amdgpu_bo **bo_ptr,
237 u64 *gpu_addr, void **cpu_addr)
238{
239 struct amdgpu_bo_param bp;
240 bool free = false;
241 int r;
242
243 if (!size) {
244 amdgpu_bo_unref(bo_ptr);
245 return 0;
246 }
247
248 memset(&bp, 0, sizeof(bp));
249 bp.size = size;
250 bp.byte_align = align;
251 bp.domain = domain;
252 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
253 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
254 bp.type = ttm_bo_type_kernel;
255 bp.resv = NULL;
256
257 if (!*bo_ptr) {
258 r = amdgpu_bo_create(adev, &bp, bo_ptr);
259 if (r) {
260 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
261 r);
262 return r;
263 }
264 free = true;
265 }
266
267 r = amdgpu_bo_reserve(*bo_ptr, false);
268 if (r) {
269 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
270 goto error_free;
271 }
272
273 r = amdgpu_bo_pin(*bo_ptr, domain);
274 if (r) {
275 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
276 goto error_unreserve;
277 }
278
279 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
280 if (r) {
281 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
282 goto error_unpin;
283 }
284
285 if (gpu_addr)
286 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
287
288 if (cpu_addr) {
289 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
290 if (r) {
291 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
292 goto error_unpin;
293 }
294 }
295
296 return 0;
297
298error_unpin:
299 amdgpu_bo_unpin(*bo_ptr);
300error_unreserve:
301 amdgpu_bo_unreserve(*bo_ptr);
302
303error_free:
304 if (free)
305 amdgpu_bo_unref(bo_ptr);
306
307 return r;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
329 unsigned long size, int align,
330 u32 domain, struct amdgpu_bo **bo_ptr,
331 u64 *gpu_addr, void **cpu_addr)
332{
333 int r;
334
335 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
336 gpu_addr, cpu_addr);
337
338 if (r)
339 return r;
340
341 if (*bo_ptr)
342 amdgpu_bo_unreserve(*bo_ptr);
343
344 return 0;
345}
346
347
348
349
350
351
352
353
354
355
356void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
357 void **cpu_addr)
358{
359 if (*bo == NULL)
360 return;
361
362 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
363 if (cpu_addr)
364 amdgpu_bo_kunmap(*bo);
365
366 amdgpu_bo_unpin(*bo);
367 amdgpu_bo_unreserve(*bo);
368 }
369 amdgpu_bo_unref(bo);
370
371 if (gpu_addr)
372 *gpu_addr = 0;
373
374 if (cpu_addr)
375 *cpu_addr = NULL;
376}
377
378
379static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
380 unsigned long size, u32 domain)
381{
382 struct ttm_mem_type_manager *man = NULL;
383
384
385
386
387
388 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
389 man = &adev->mman.bdev.man[TTM_PL_TT];
390
391 if (size < (man->size << PAGE_SHIFT))
392 return true;
393 else
394 goto fail;
395 }
396
397 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
398 man = &adev->mman.bdev.man[TTM_PL_VRAM];
399
400 if (size < (man->size << PAGE_SHIFT))
401 return true;
402 else
403 goto fail;
404 }
405
406
407
408 return true;
409
410fail:
411 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
412 man->size << PAGE_SHIFT);
413 return false;
414}
415
416static int amdgpu_bo_do_create(struct amdgpu_device *adev,
417 struct amdgpu_bo_param *bp,
418 struct amdgpu_bo **bo_ptr)
419{
420 struct ttm_operation_ctx ctx = {
421 .interruptible = (bp->type != ttm_bo_type_kernel),
422 .no_wait_gpu = false,
423 .resv = bp->resv,
424 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
425 };
426 struct amdgpu_bo *bo;
427 unsigned long page_align, size = bp->size;
428 size_t acc_size;
429 int r;
430
431
432 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
433
434 page_align = bp->byte_align;
435 size <<= PAGE_SHIFT;
436 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
437
438 page_align = ALIGN(bp->byte_align, 4);
439 size = ALIGN(size, 4) << PAGE_SHIFT;
440 } else {
441
442 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
443 size = ALIGN(size, PAGE_SIZE);
444 }
445
446 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
447 return -ENOMEM;
448
449 *bo_ptr = NULL;
450
451 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
452 sizeof(struct amdgpu_bo));
453
454 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
455 if (bo == NULL)
456 return -ENOMEM;
457 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
458 INIT_LIST_HEAD(&bo->shadow_list);
459 bo->vm_bo = NULL;
460 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
461 bp->domain;
462 bo->allowed_domains = bo->preferred_domains;
463 if (bp->type != ttm_bo_type_kernel &&
464 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
465 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
466
467 bo->flags = bp->flags;
468
469#ifdef CONFIG_X86_32
470
471
472
473 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
474#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
475
476
477
478
479
480#ifndef CONFIG_COMPILE_TEST
481#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
482 thanks to write-combining
483#endif
484
485 if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
486 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
487 "better performance thanks to write-combining\n");
488 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
489#else
490
491
492
493 if (!drm_arch_can_wc_memory())
494 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
495#endif
496
497 bo->tbo.bdev = &adev->mman.bdev;
498 amdgpu_bo_placement_from_domain(bo, bp->domain);
499 if (bp->type == ttm_bo_type_kernel)
500 bo->tbo.priority = 1;
501
502 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
503 &bo->placement, page_align, &ctx, acc_size,
504 NULL, bp->resv, &amdgpu_bo_destroy);
505 if (unlikely(r != 0))
506 return r;
507
508 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
509 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
510 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
511 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
512 ctx.bytes_moved);
513 else
514 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
515
516 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
517 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
518 struct dma_fence *fence;
519
520 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
521 if (unlikely(r))
522 goto fail_unreserve;
523
524 amdgpu_bo_fence(bo, fence, false);
525 dma_fence_put(bo->tbo.moving);
526 bo->tbo.moving = dma_fence_get(fence);
527 dma_fence_put(fence);
528 }
529 if (!bp->resv)
530 amdgpu_bo_unreserve(bo);
531 *bo_ptr = bo;
532
533 trace_amdgpu_bo_create(bo);
534
535
536 if (bp->type == ttm_bo_type_device)
537 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
538
539 return 0;
540
541fail_unreserve:
542 if (!bp->resv)
543 ww_mutex_unlock(&bo->tbo.resv->lock);
544 amdgpu_bo_unref(&bo);
545 return r;
546}
547
548static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
549 unsigned long size,
550 struct amdgpu_bo *bo)
551{
552 struct amdgpu_bo_param bp;
553 int r;
554
555 if (bo->shadow)
556 return 0;
557
558 memset(&bp, 0, sizeof(bp));
559 bp.size = size;
560 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
561 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
562 AMDGPU_GEM_CREATE_SHADOW;
563 bp.type = ttm_bo_type_kernel;
564 bp.resv = bo->tbo.resv;
565
566 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
567 if (!r) {
568 bo->shadow->parent = amdgpu_bo_ref(bo);
569 mutex_lock(&adev->shadow_list_lock);
570 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
571 mutex_unlock(&adev->shadow_list_lock);
572 }
573
574 return r;
575}
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591int amdgpu_bo_create(struct amdgpu_device *adev,
592 struct amdgpu_bo_param *bp,
593 struct amdgpu_bo **bo_ptr)
594{
595 u64 flags = bp->flags;
596 int r;
597
598 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
599 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
600 if (r)
601 return r;
602
603 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
604 if (!bp->resv)
605 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
606 NULL));
607
608 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
609
610 if (!bp->resv)
611 reservation_object_unlock((*bo_ptr)->tbo.resv);
612
613 if (r)
614 amdgpu_bo_unref(bo_ptr);
615 }
616
617 return r;
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632int amdgpu_bo_validate(struct amdgpu_bo *bo)
633{
634 struct ttm_operation_ctx ctx = { false, false };
635 uint32_t domain;
636 int r;
637
638 if (bo->pin_count)
639 return 0;
640
641 domain = bo->preferred_domains;
642
643retry:
644 amdgpu_bo_placement_from_domain(bo, domain);
645 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
646 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
647 domain = bo->allowed_domains;
648 goto retry;
649 }
650
651 return r;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
668
669{
670 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
671 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
672 uint64_t shadow_addr, parent_addr;
673
674 shadow_addr = amdgpu_bo_gpu_offset(shadow);
675 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
676
677 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
678 amdgpu_bo_size(shadow), NULL, fence,
679 true, false);
680}
681
682
683
684
685
686
687
688
689
690
691
692
693int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
694{
695 void *kptr;
696 long r;
697
698 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
699 return -EPERM;
700
701 kptr = amdgpu_bo_kptr(bo);
702 if (kptr) {
703 if (ptr)
704 *ptr = kptr;
705 return 0;
706 }
707
708 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
709 MAX_SCHEDULE_TIMEOUT);
710 if (r < 0)
711 return r;
712
713 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
714 if (r)
715 return r;
716
717 if (ptr)
718 *ptr = amdgpu_bo_kptr(bo);
719
720 return 0;
721}
722
723
724
725
726
727
728
729
730
731
732void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
733{
734 bool is_iomem;
735
736 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
737}
738
739
740
741
742
743
744
745void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
746{
747 if (bo->kmap.bo)
748 ttm_bo_kunmap(&bo->kmap);
749}
750
751
752
753
754
755
756
757
758
759
760struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
761{
762 if (bo == NULL)
763 return NULL;
764
765 ttm_bo_get(&bo->tbo);
766 return bo;
767}
768
769
770
771
772
773
774
775void amdgpu_bo_unref(struct amdgpu_bo **bo)
776{
777 struct ttm_buffer_object *tbo;
778
779 if ((*bo) == NULL)
780 return;
781
782 tbo = &((*bo)->tbo);
783 ttm_bo_put(tbo);
784 *bo = NULL;
785}
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
810 u64 min_offset, u64 max_offset)
811{
812 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
813 struct ttm_operation_ctx ctx = { false, false };
814 int r, i;
815
816 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
817 return -EPERM;
818
819 if (WARN_ON_ONCE(min_offset > max_offset))
820 return -EINVAL;
821
822
823 if (bo->prime_shared_count) {
824 if (domain & AMDGPU_GEM_DOMAIN_GTT)
825 domain = AMDGPU_GEM_DOMAIN_GTT;
826 else
827 return -EINVAL;
828 }
829
830
831
832
833 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
834
835 if (bo->pin_count) {
836 uint32_t mem_type = bo->tbo.mem.mem_type;
837
838 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
839 return -EINVAL;
840
841 bo->pin_count++;
842
843 if (max_offset != 0) {
844 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
845 WARN_ON_ONCE(max_offset <
846 (amdgpu_bo_gpu_offset(bo) - domain_start));
847 }
848
849 return 0;
850 }
851
852 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
853
854 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
855 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
856 amdgpu_bo_placement_from_domain(bo, domain);
857 for (i = 0; i < bo->placement.num_placement; i++) {
858 unsigned fpfn, lpfn;
859
860 fpfn = min_offset >> PAGE_SHIFT;
861 lpfn = max_offset >> PAGE_SHIFT;
862
863 if (fpfn > bo->placements[i].fpfn)
864 bo->placements[i].fpfn = fpfn;
865 if (!bo->placements[i].lpfn ||
866 (lpfn && lpfn < bo->placements[i].lpfn))
867 bo->placements[i].lpfn = lpfn;
868 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
869 }
870
871 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
872 if (unlikely(r)) {
873 dev_err(adev->dev, "%p pin failed\n", bo);
874 goto error;
875 }
876
877 bo->pin_count = 1;
878
879 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
880 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
881 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
882 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
883 &adev->visible_pin_size);
884 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
885 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
886 }
887
888error:
889 return r;
890}
891
892
893
894
895
896
897
898
899
900
901
902
903
904int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
905{
906 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
907}
908
909
910
911
912
913
914
915
916
917
918
919int amdgpu_bo_unpin(struct amdgpu_bo *bo)
920{
921 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
922 struct ttm_operation_ctx ctx = { false, false };
923 int r, i;
924
925 if (WARN_ON_ONCE(!bo->pin_count)) {
926 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
927 return 0;
928 }
929 bo->pin_count--;
930 if (bo->pin_count)
931 return 0;
932
933 amdgpu_bo_subtract_pin_size(bo);
934
935 for (i = 0; i < bo->placement.num_placement; i++) {
936 bo->placements[i].lpfn = 0;
937 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
938 }
939 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
940 if (unlikely(r))
941 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
942
943 return r;
944}
945
946
947
948
949
950
951
952
953
954
955
956int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
957{
958
959#ifndef CONFIG_HIBERNATION
960 if (adev->flags & AMD_IS_APU) {
961
962 return 0;
963 }
964#endif
965 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
966}
967
968static const char *amdgpu_vram_names[] = {
969 "UNKNOWN",
970 "GDDR1",
971 "DDR2",
972 "GDDR3",
973 "GDDR4",
974 "GDDR5",
975 "HBM",
976 "DDR3",
977 "DDR4",
978};
979
980
981
982
983
984
985
986
987
988
989int amdgpu_bo_init(struct amdgpu_device *adev)
990{
991
992 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
993 adev->gmc.aper_size);
994
995
996 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
997 adev->gmc.aper_size);
998 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
999 adev->gmc.mc_vram_size >> 20,
1000 (unsigned long long)adev->gmc.aper_size >> 20);
1001 DRM_INFO("RAM width %dbits %s\n",
1002 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1003 return amdgpu_ttm_init(adev);
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016int amdgpu_bo_late_init(struct amdgpu_device *adev)
1017{
1018 amdgpu_ttm_late_init(adev);
1019
1020 return 0;
1021}
1022
1023
1024
1025
1026
1027
1028
1029void amdgpu_bo_fini(struct amdgpu_device *adev)
1030{
1031 amdgpu_ttm_fini(adev);
1032 arch_phys_wc_del(adev->gmc.vram_mtrr);
1033 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1047 struct vm_area_struct *vma)
1048{
1049 return ttm_fbdev_mmap(vma, &bo->tbo);
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1064{
1065 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1066
1067 if (adev->family <= AMDGPU_FAMILY_CZ &&
1068 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1069 return -EINVAL;
1070
1071 bo->tiling_flags = tiling_flags;
1072 return 0;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1084{
1085 lockdep_assert_held(&bo->tbo.resv->lock.base);
1086
1087 if (tiling_flags)
1088 *tiling_flags = bo->tiling_flags;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1105 uint32_t metadata_size, uint64_t flags)
1106{
1107 void *buffer;
1108
1109 if (!metadata_size) {
1110 if (bo->metadata_size) {
1111 kfree(bo->metadata);
1112 bo->metadata = NULL;
1113 bo->metadata_size = 0;
1114 }
1115 return 0;
1116 }
1117
1118 if (metadata == NULL)
1119 return -EINVAL;
1120
1121 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1122 if (buffer == NULL)
1123 return -ENOMEM;
1124
1125 kfree(bo->metadata);
1126 bo->metadata_flags = flags;
1127 bo->metadata = buffer;
1128 bo->metadata_size = metadata_size;
1129
1130 return 0;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1149 size_t buffer_size, uint32_t *metadata_size,
1150 uint64_t *flags)
1151{
1152 if (!buffer && !metadata_size)
1153 return -EINVAL;
1154
1155 if (buffer) {
1156 if (buffer_size < bo->metadata_size)
1157 return -EINVAL;
1158
1159 if (bo->metadata_size)
1160 memcpy(buffer, bo->metadata, bo->metadata_size);
1161 }
1162
1163 if (metadata_size)
1164 *metadata_size = bo->metadata_size;
1165 if (flags)
1166 *flags = bo->metadata_flags;
1167
1168 return 0;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1182 bool evict,
1183 struct ttm_mem_reg *new_mem)
1184{
1185 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1186 struct amdgpu_bo *abo;
1187 struct ttm_mem_reg *old_mem = &bo->mem;
1188
1189 if (!amdgpu_bo_is_amdgpu_bo(bo))
1190 return;
1191
1192 abo = ttm_to_amdgpu_bo(bo);
1193 amdgpu_vm_bo_invalidate(adev, abo, evict);
1194
1195 amdgpu_bo_kunmap(abo);
1196
1197
1198 if (evict)
1199 atomic64_inc(&adev->num_evictions);
1200
1201
1202 if (!new_mem)
1203 return;
1204
1205
1206 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1221{
1222 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1223 struct ttm_operation_ctx ctx = { false, false };
1224 struct amdgpu_bo *abo;
1225 unsigned long offset, size;
1226 int r;
1227
1228 if (!amdgpu_bo_is_amdgpu_bo(bo))
1229 return 0;
1230
1231 abo = ttm_to_amdgpu_bo(bo);
1232
1233
1234 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1235
1236 if (bo->mem.mem_type != TTM_PL_VRAM)
1237 return 0;
1238
1239 size = bo->mem.num_pages << PAGE_SHIFT;
1240 offset = bo->mem.start << PAGE_SHIFT;
1241 if ((offset + size) <= adev->gmc.visible_vram_size)
1242 return 0;
1243
1244
1245 if (abo->pin_count > 0)
1246 return -EINVAL;
1247
1248
1249 atomic64_inc(&adev->num_vram_cpu_page_faults);
1250 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1251 AMDGPU_GEM_DOMAIN_GTT);
1252
1253
1254 abo->placement.num_busy_placement = 1;
1255 abo->placement.busy_placement = &abo->placements[1];
1256
1257 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1258 if (unlikely(r != 0))
1259 return r;
1260
1261 offset = bo->mem.start << PAGE_SHIFT;
1262
1263 if (bo->mem.mem_type == TTM_PL_VRAM &&
1264 (offset + size) > adev->gmc.visible_vram_size)
1265 return -EINVAL;
1266
1267 return 0;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1279 bool shared)
1280{
1281 struct reservation_object *resv = bo->tbo.resv;
1282
1283 if (shared)
1284 reservation_object_add_shared_fence(resv, fence);
1285 else
1286 reservation_object_add_excl_fence(resv, fence);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1300{
1301 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1302 struct amdgpu_sync sync;
1303 int r;
1304
1305 amdgpu_sync_create(&sync);
1306 amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
1307 r = amdgpu_sync_wait(&sync, intr);
1308 amdgpu_sync_free(&sync);
1309
1310 return r;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1324{
1325 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1326 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
1327 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1328 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1329 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1330 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1331
1332 return amdgpu_gmc_sign_extend(bo->tbo.offset);
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1344 uint32_t domain)
1345{
1346 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1347 domain = AMDGPU_GEM_DOMAIN_VRAM;
1348 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1349 domain = AMDGPU_GEM_DOMAIN_GTT;
1350 }
1351 return domain;
1352}
1353