1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/io-64-nonatomic-lo-hi.h>
28
29#include "amdgpu.h"
30#include "amdgpu_gmc.h"
31#include "amdgpu_ras.h"
32#include "amdgpu_xgmi.h"
33
34#include <drm/drm_drv.h>
35
36
37
38
39
40
41
42
43
44int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
45{
46 int r;
47 struct amdgpu_bo_param bp;
48 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
49 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
50 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
51
52 memset(&bp, 0, sizeof(bp));
53 bp.size = PAGE_ALIGN((npdes + 1) * 8);
54 bp.byte_align = PAGE_SIZE;
55 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
56 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
57 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
58 bp.type = ttm_bo_type_kernel;
59 bp.resv = NULL;
60 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
61
62 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
63 if (r)
64 return r;
65
66 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
67 if (unlikely(r != 0))
68 goto bo_reserve_failure;
69
70 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
71 if (r)
72 goto bo_pin_failure;
73 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
74 if (r)
75 goto bo_kmap_failure;
76
77 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
78 return 0;
79
80bo_kmap_failure:
81 amdgpu_bo_unpin(adev->gmc.pdb0_bo);
82bo_pin_failure:
83 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
84bo_reserve_failure:
85 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
86 return r;
87}
88
89
90
91
92
93
94
95
96
97
98
99void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
100 uint64_t *addr, uint64_t *flags)
101{
102 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
103
104 switch (bo->tbo.resource->mem_type) {
105 case TTM_PL_TT:
106 *addr = bo->tbo.ttm->dma_address[0];
107 break;
108 case TTM_PL_VRAM:
109 *addr = amdgpu_bo_gpu_offset(bo);
110 break;
111 default:
112 *addr = 0;
113 break;
114 }
115 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
116 amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
117}
118
119
120
121
122uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
123{
124 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
125 uint64_t pd_addr;
126
127
128 if (adev->asic_type >= CHIP_VEGA10) {
129 uint64_t flags = AMDGPU_PTE_VALID;
130
131 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
132 pd_addr |= flags;
133 } else {
134 pd_addr = amdgpu_bo_gpu_offset(bo);
135 }
136 return pd_addr;
137}
138
139
140
141
142
143
144
145
146
147
148
149
150int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
151 uint32_t gpu_page_idx, uint64_t addr,
152 uint64_t flags)
153{
154 void __iomem *ptr = (void *)cpu_pt_addr;
155 uint64_t value;
156
157
158
159
160 value = addr & 0x0000FFFFFFFFF000ULL;
161 value |= flags;
162 writeq(value, ptr + (gpu_page_idx * 8));
163
164 return 0;
165}
166
167
168
169
170
171
172
173
174
175uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
176{
177 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
178
179 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
180 return AMDGPU_BO_INVALID_OFFSET;
181
182 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
183 return AMDGPU_BO_INVALID_OFFSET;
184
185 return adev->gmc.agp_start + bo->ttm->dma_address[0];
186}
187
188
189
190
191
192
193
194
195
196
197
198void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
199 u64 base)
200{
201 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
202
203 mc->vram_start = base;
204 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
205 if (limit && limit < mc->real_vram_size)
206 mc->real_vram_size = limit;
207
208 if (mc->xgmi.num_physical_nodes == 0) {
209 mc->fb_start = mc->vram_start;
210 mc->fb_end = mc->vram_end;
211 }
212 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
213 mc->mc_vram_size >> 20, mc->vram_start,
214 mc->vram_end, mc->real_vram_size >> 20);
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
234{
235 u64 hive_vram_start = 0;
236 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
237 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
238 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
239 mc->gart_start = hive_vram_end + 1;
240 mc->gart_end = mc->gart_start + mc->gart_size - 1;
241 mc->fb_start = hive_vram_start;
242 mc->fb_end = hive_vram_end;
243 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
244 mc->mc_vram_size >> 20, mc->vram_start,
245 mc->vram_end, mc->real_vram_size >> 20);
246 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
247 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
248}
249
250
251
252
253
254
255
256
257
258
259
260void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
261{
262 const uint64_t four_gb = 0x100000000ULL;
263 u64 size_af, size_bf;
264
265 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
266
267
268
269
270 size_bf = mc->fb_start;
271 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
272
273 if (mc->gart_size > max(size_bf, size_af)) {
274 dev_warn(adev->dev, "limiting GART\n");
275 mc->gart_size = max(size_bf, size_af);
276 }
277
278 if ((size_bf >= mc->gart_size && size_bf < size_af) ||
279 (size_af < mc->gart_size))
280 mc->gart_start = 0;
281 else
282 mc->gart_start = max_mc_address - mc->gart_size + 1;
283
284 mc->gart_start &= ~(four_gb - 1);
285 mc->gart_end = mc->gart_start + mc->gart_size - 1;
286 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
287 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
288}
289
290
291
292
293
294
295
296
297
298
299
300
301void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
302{
303 const uint64_t sixteen_gb = 1ULL << 34;
304 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
305 u64 size_af, size_bf;
306
307 if (amdgpu_sriov_vf(adev)) {
308 mc->agp_start = 0xffffffffffff;
309 mc->agp_end = 0x0;
310 mc->agp_size = 0;
311
312 return;
313 }
314
315 if (mc->fb_start > mc->gart_start) {
316 size_bf = (mc->fb_start & sixteen_gb_mask) -
317 ALIGN(mc->gart_end + 1, sixteen_gb);
318 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
319 } else {
320 size_bf = mc->fb_start & sixteen_gb_mask;
321 size_af = (mc->gart_start & sixteen_gb_mask) -
322 ALIGN(mc->fb_end + 1, sixteen_gb);
323 }
324
325 if (size_bf > size_af) {
326 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
327 mc->agp_size = size_bf;
328 } else {
329 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
330 mc->agp_size = size_af;
331 }
332
333 mc->agp_end = mc->agp_start + mc->agp_size - 1;
334 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
335 mc->agp_size >> 20, mc->agp_start, mc->agp_end);
336}
337
338
339
340
341
342
343
344static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
345{
346 return addr << 4 | pasid;
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
363 struct amdgpu_ih_ring *ih, uint64_t addr,
364 uint16_t pasid, uint64_t timestamp)
365{
366 struct amdgpu_gmc *gmc = &adev->gmc;
367 uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
368 struct amdgpu_gmc_fault *fault;
369 uint32_t hash;
370
371
372 if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp))
373 return true;
374
375
376 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
377 AMDGPU_GMC_FAULT_TIMEOUT;
378 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
379 return true;
380
381
382 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
383 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
384 while (fault->timestamp >= stamp) {
385 uint64_t tmp;
386
387 if (atomic64_read(&fault->key) == key)
388 return true;
389
390 tmp = fault->timestamp;
391 fault = &gmc->fault_ring[fault->next];
392
393
394 if (fault->timestamp >= tmp)
395 break;
396 }
397
398
399 fault = &gmc->fault_ring[gmc->last_fault];
400 atomic64_set(&fault->key, key);
401 fault->timestamp = timestamp;
402
403
404 fault->next = gmc->fault_hash[hash].idx;
405 gmc->fault_hash[hash].idx = gmc->last_fault++;
406 return false;
407}
408
409
410
411
412
413
414
415
416
417
418
419void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
420 uint16_t pasid)
421{
422 struct amdgpu_gmc *gmc = &adev->gmc;
423 uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
424 struct amdgpu_gmc_fault *fault;
425 uint32_t hash;
426 uint64_t tmp;
427
428 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
429 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
430 do {
431 if (atomic64_cmpxchg(&fault->key, key, 0) == key)
432 break;
433
434 tmp = fault->timestamp;
435 fault = &gmc->fault_ring[fault->next];
436 } while (fault->timestamp < tmp);
437}
438
439int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
440{
441 int r;
442
443 if (adev->umc.ras_funcs &&
444 adev->umc.ras_funcs->ras_late_init) {
445 r = adev->umc.ras_funcs->ras_late_init(adev);
446 if (r)
447 return r;
448 }
449
450 if (adev->mmhub.ras_funcs &&
451 adev->mmhub.ras_funcs->ras_late_init) {
452 r = adev->mmhub.ras_funcs->ras_late_init(adev);
453 if (r)
454 return r;
455 }
456
457 if (!adev->gmc.xgmi.connected_to_cpu)
458 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
459
460 if (adev->gmc.xgmi.ras_funcs &&
461 adev->gmc.xgmi.ras_funcs->ras_late_init) {
462 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
463 if (r)
464 return r;
465 }
466
467 if (adev->hdp.ras_funcs &&
468 adev->hdp.ras_funcs->ras_late_init) {
469 r = adev->hdp.ras_funcs->ras_late_init(adev);
470 if (r)
471 return r;
472 }
473
474 if (adev->mca.mp0.ras_funcs &&
475 adev->mca.mp0.ras_funcs->ras_late_init) {
476 r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
477 if (r)
478 return r;
479 }
480
481 if (adev->mca.mp1.ras_funcs &&
482 adev->mca.mp1.ras_funcs->ras_late_init) {
483 r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
484 if (r)
485 return r;
486 }
487
488 if (adev->mca.mpio.ras_funcs &&
489 adev->mca.mpio.ras_funcs->ras_late_init) {
490 r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
491 if (r)
492 return r;
493 }
494
495 return 0;
496}
497
498void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
499{
500 if (adev->umc.ras_funcs &&
501 adev->umc.ras_funcs->ras_fini)
502 adev->umc.ras_funcs->ras_fini(adev);
503
504 if (adev->mmhub.ras_funcs &&
505 adev->mmhub.ras_funcs->ras_fini)
506 adev->mmhub.ras_funcs->ras_fini(adev);
507
508 if (adev->gmc.xgmi.ras_funcs &&
509 adev->gmc.xgmi.ras_funcs->ras_fini)
510 adev->gmc.xgmi.ras_funcs->ras_fini(adev);
511
512 if (adev->hdp.ras_funcs &&
513 adev->hdp.ras_funcs->ras_fini)
514 adev->hdp.ras_funcs->ras_fini(adev);
515}
516
517
518
519
520
521
522
523
524#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
525#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
526
527int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
528{
529 struct amdgpu_ring *ring;
530 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
531 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
532 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
533 unsigned i;
534 unsigned vmhub, inv_eng;
535
536 for (i = 0; i < adev->num_rings; ++i) {
537 ring = adev->rings[i];
538 vmhub = ring->funcs->vmhub;
539
540 if (ring == &adev->mes.ring)
541 continue;
542
543 inv_eng = ffs(vm_inv_engs[vmhub]);
544 if (!inv_eng) {
545 dev_err(adev->dev, "no VM inv eng for ring %s\n",
546 ring->name);
547 return -EINVAL;
548 }
549
550 ring->vm_inv_eng = inv_eng - 1;
551 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
552
553 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
554 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
555 }
556
557 return 0;
558}
559
560
561
562
563
564
565
566
567void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
568{
569 switch (adev->asic_type) {
570 case CHIP_RAVEN:
571 case CHIP_RENOIR:
572 if (amdgpu_tmz == 0) {
573 adev->gmc.tmz_enabled = false;
574 dev_info(adev->dev,
575 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
576 } else {
577 adev->gmc.tmz_enabled = true;
578 dev_info(adev->dev,
579 "Trusted Memory Zone (TMZ) feature enabled\n");
580 }
581 break;
582 case CHIP_NAVI10:
583 case CHIP_NAVI14:
584 case CHIP_NAVI12:
585 case CHIP_VANGOGH:
586 case CHIP_YELLOW_CARP:
587
588
589 if (amdgpu_tmz < 1) {
590 adev->gmc.tmz_enabled = false;
591 dev_info(adev->dev,
592 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
593 } else {
594 adev->gmc.tmz_enabled = true;
595 dev_info(adev->dev,
596 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
597 }
598 break;
599 default:
600 adev->gmc.tmz_enabled = false;
601 dev_info(adev->dev,
602 "Trusted Memory Zone (TMZ) feature not supported\n");
603 break;
604 }
605}
606
607
608
609
610
611
612
613
614void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
615{
616 struct amdgpu_gmc *gmc = &adev->gmc;
617
618 switch (adev->asic_type) {
619 case CHIP_VEGA10:
620 case CHIP_VEGA20:
621 case CHIP_ARCTURUS:
622 case CHIP_ALDEBARAN:
623
624
625
626
627 if (amdgpu_noretry == -1)
628 gmc->noretry = 1;
629 else
630 gmc->noretry = amdgpu_noretry;
631 break;
632 case CHIP_RAVEN:
633 default:
634
635
636
637
638
639
640
641
642
643
644
645 if (amdgpu_noretry == -1)
646 gmc->noretry = 0;
647 else
648 gmc->noretry = amdgpu_noretry;
649 break;
650 }
651}
652
653void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
654 bool enable)
655{
656 struct amdgpu_vmhub *hub;
657 u32 tmp, reg, i;
658
659 hub = &adev->vmhub[hub_type];
660 for (i = 0; i < 16; i++) {
661 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
662
663 tmp = (hub_type == AMDGPU_GFXHUB_0) ?
664 RREG32_SOC15_IP(GC, reg) :
665 RREG32_SOC15_IP(MMHUB, reg);
666
667 if (enable)
668 tmp |= hub->vm_cntx_cntl_vm_fault;
669 else
670 tmp &= ~hub->vm_cntx_cntl_vm_fault;
671
672 (hub_type == AMDGPU_GFXHUB_0) ?
673 WREG32_SOC15_IP(GC, reg, tmp) :
674 WREG32_SOC15_IP(MMHUB, reg, tmp);
675 }
676}
677
678void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
679{
680 unsigned size;
681
682
683
684
685
686
687
688
689
690 switch (adev->asic_type) {
691 case CHIP_VEGA10:
692 case CHIP_RAVEN:
693 case CHIP_RENOIR:
694 adev->mman.keep_stolen_vga_memory = true;
695 break;
696 default:
697 adev->mman.keep_stolen_vga_memory = false;
698 break;
699 }
700
701 if (amdgpu_sriov_vf(adev) ||
702 !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
703 size = 0;
704 } else {
705 size = amdgpu_gmc_get_vbios_fb_size(adev);
706
707 if (adev->mman.keep_stolen_vga_memory)
708 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
709 }
710
711
712 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
713 size = 0;
714
715 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
716 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
717 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
718 } else {
719 adev->mman.stolen_vga_size = size;
720 adev->mman.stolen_extended_size = 0;
721 }
722}
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
741{
742 int i;
743 uint64_t flags = adev->gart.gart_pte_flags;
744
745
746 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
747 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
748 u64 vram_addr = adev->vm_manager.vram_base_offset -
749 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
750 u64 vram_end = vram_addr + vram_size;
751 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
752 int idx;
753
754 if (!drm_dev_enter(adev_to_drm(adev), &idx))
755 return;
756
757 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
758 flags |= AMDGPU_PTE_WRITEABLE;
759 flags |= AMDGPU_PTE_SNOOPED;
760 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
761 flags |= AMDGPU_PDE_PTE;
762
763
764
765
766 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
767 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
768
769
770
771
772
773 flags = AMDGPU_PTE_VALID;
774 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
775
776 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
777 drm_dev_exit(idx);
778}
779
780
781
782
783
784
785
786
787uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
788{
789 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
790}
791
792
793
794
795
796
797
798
799uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
800{
801 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
802}
803
804
805
806
807
808
809
810
811uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
812{
813 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
814}
815
816void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
817{
818
819
820 adev->mman.stolen_reserved_offset = 0;
821 adev->mman.stolen_reserved_size = 0;
822
823 switch (adev->asic_type) {
824 case CHIP_YELLOW_CARP:
825 if (amdgpu_discovery == 0) {
826 adev->mman.stolen_reserved_offset = 0x1ffb0000;
827 adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
828 }
829 break;
830 default:
831 break;
832 }
833}
834