1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/io-64-nonatomic-lo-hi.h>
28
29#include "amdgpu.h"
30#include "amdgpu_gmc.h"
31#include "amdgpu_ras.h"
32#include "amdgpu_xgmi.h"
33
34
35
36
37
38
39
40
41
42int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
43{
44 int r;
45 struct amdgpu_bo_param bp;
46 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
47 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
48 uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
49
50 memset(&bp, 0, sizeof(bp));
51 bp.size = PAGE_ALIGN((npdes + 1) * 8);
52 bp.byte_align = PAGE_SIZE;
53 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
54 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
55 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
56 bp.type = ttm_bo_type_kernel;
57 bp.resv = NULL;
58 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
59
60 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
61 if (r)
62 return r;
63
64 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
65 if (unlikely(r != 0))
66 goto bo_reserve_failure;
67
68 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
69 if (r)
70 goto bo_pin_failure;
71 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
72 if (r)
73 goto bo_kmap_failure;
74
75 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
76 return 0;
77
78bo_kmap_failure:
79 amdgpu_bo_unpin(adev->gmc.pdb0_bo);
80bo_pin_failure:
81 amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
82bo_reserve_failure:
83 amdgpu_bo_unref(&adev->gmc.pdb0_bo);
84 return r;
85}
86
87
88
89
90
91
92
93
94
95
96
97void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
98 uint64_t *addr, uint64_t *flags)
99{
100 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
101
102 switch (bo->tbo.mem.mem_type) {
103 case TTM_PL_TT:
104 *addr = bo->tbo.ttm->dma_address[0];
105 break;
106 case TTM_PL_VRAM:
107 *addr = amdgpu_bo_gpu_offset(bo);
108 break;
109 default:
110 *addr = 0;
111 break;
112 }
113 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
114 amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
115}
116
117
118
119
120uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
121{
122 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
123 uint64_t pd_addr;
124
125
126 if (adev->asic_type >= CHIP_VEGA10) {
127 uint64_t flags = AMDGPU_PTE_VALID;
128
129 amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
130 pd_addr |= flags;
131 } else {
132 pd_addr = amdgpu_bo_gpu_offset(bo);
133 }
134 return pd_addr;
135}
136
137
138
139
140
141
142
143
144
145
146
147
148int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
149 uint32_t gpu_page_idx, uint64_t addr,
150 uint64_t flags)
151{
152 void __iomem *ptr = (void *)cpu_pt_addr;
153 uint64_t value;
154
155
156
157
158 value = addr & 0x0000FFFFFFFFF000ULL;
159 value |= flags;
160 writeq(value, ptr + (gpu_page_idx * 8));
161 return 0;
162}
163
164
165
166
167
168
169
170
171
172uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
173{
174 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
175
176 if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
177 return AMDGPU_BO_INVALID_OFFSET;
178
179 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
180 return AMDGPU_BO_INVALID_OFFSET;
181
182 return adev->gmc.agp_start + bo->ttm->dma_address[0];
183}
184
185
186
187
188
189
190
191
192
193
194
195void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
196 u64 base)
197{
198 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
199
200 mc->vram_start = base;
201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
202 if (limit && limit < mc->real_vram_size)
203 mc->real_vram_size = limit;
204
205 if (mc->xgmi.num_physical_nodes == 0) {
206 mc->fb_start = mc->vram_start;
207 mc->fb_end = mc->vram_end;
208 }
209 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
210 mc->mc_vram_size >> 20, mc->vram_start,
211 mc->vram_end, mc->real_vram_size >> 20);
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
231{
232 u64 hive_vram_start = 0;
233 u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
234 mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
235 mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
236 mc->gart_start = hive_vram_end + 1;
237 mc->gart_end = mc->gart_start + mc->gart_size - 1;
238 mc->fb_start = hive_vram_start;
239 mc->fb_end = hive_vram_end;
240 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
241 mc->mc_vram_size >> 20, mc->vram_start,
242 mc->vram_end, mc->real_vram_size >> 20);
243 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
244 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
245}
246
247
248
249
250
251
252
253
254
255
256
257void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
258{
259 const uint64_t four_gb = 0x100000000ULL;
260 u64 size_af, size_bf;
261
262 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
263
264
265
266
267 size_bf = mc->fb_start;
268 size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
269
270 if (mc->gart_size > max(size_bf, size_af)) {
271 dev_warn(adev->dev, "limiting GART\n");
272 mc->gart_size = max(size_bf, size_af);
273 }
274
275 if ((size_bf >= mc->gart_size && size_bf < size_af) ||
276 (size_af < mc->gart_size))
277 mc->gart_start = 0;
278 else
279 mc->gart_start = max_mc_address - mc->gart_size + 1;
280
281 mc->gart_start &= ~(four_gb - 1);
282 mc->gart_end = mc->gart_start + mc->gart_size - 1;
283 dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
284 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
285}
286
287
288
289
290
291
292
293
294
295
296
297
298void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
299{
300 const uint64_t sixteen_gb = 1ULL << 34;
301 const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
302 u64 size_af, size_bf;
303
304 if (amdgpu_sriov_vf(adev)) {
305 mc->agp_start = 0xffffffffffff;
306 mc->agp_end = 0x0;
307 mc->agp_size = 0;
308
309 return;
310 }
311
312 if (mc->fb_start > mc->gart_start) {
313 size_bf = (mc->fb_start & sixteen_gb_mask) -
314 ALIGN(mc->gart_end + 1, sixteen_gb);
315 size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
316 } else {
317 size_bf = mc->fb_start & sixteen_gb_mask;
318 size_af = (mc->gart_start & sixteen_gb_mask) -
319 ALIGN(mc->fb_end + 1, sixteen_gb);
320 }
321
322 if (size_bf > size_af) {
323 mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
324 mc->agp_size = size_bf;
325 } else {
326 mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
327 mc->agp_size = size_af;
328 }
329
330 mc->agp_end = mc->agp_start + mc->agp_size - 1;
331 dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
332 mc->agp_size >> 20, mc->agp_start, mc->agp_end);
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
348 uint16_t pasid, uint64_t timestamp)
349{
350 struct amdgpu_gmc *gmc = &adev->gmc;
351
352 uint64_t stamp, key = addr << 4 | pasid;
353 struct amdgpu_gmc_fault *fault;
354 uint32_t hash;
355
356
357 stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
358 AMDGPU_GMC_FAULT_TIMEOUT;
359 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
360 return true;
361
362
363 hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
364 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
365 while (fault->timestamp >= stamp) {
366 uint64_t tmp;
367
368 if (fault->key == key)
369 return true;
370
371 tmp = fault->timestamp;
372 fault = &gmc->fault_ring[fault->next];
373
374
375 if (fault->timestamp >= tmp)
376 break;
377 }
378
379
380 fault = &gmc->fault_ring[gmc->last_fault];
381 fault->key = key;
382 fault->timestamp = timestamp;
383
384
385 fault->next = gmc->fault_hash[hash].idx;
386 gmc->fault_hash[hash].idx = gmc->last_fault++;
387 return false;
388}
389
390int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
391{
392 int r;
393
394 if (adev->umc.ras_funcs &&
395 adev->umc.ras_funcs->ras_late_init) {
396 r = adev->umc.ras_funcs->ras_late_init(adev);
397 if (r)
398 return r;
399 }
400
401 if (adev->mmhub.ras_funcs &&
402 adev->mmhub.ras_funcs->ras_late_init) {
403 r = adev->mmhub.ras_funcs->ras_late_init(adev);
404 if (r)
405 return r;
406 }
407
408 if (!adev->gmc.xgmi.connected_to_cpu)
409 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
410
411 if (adev->gmc.xgmi.ras_funcs &&
412 adev->gmc.xgmi.ras_funcs->ras_late_init) {
413 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
414 if (r)
415 return r;
416 }
417
418 return 0;
419}
420
421void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
422{
423 if (adev->umc.ras_funcs &&
424 adev->umc.ras_funcs->ras_fini)
425 adev->umc.ras_funcs->ras_fini(adev);
426
427 if (adev->mmhub.ras_funcs &&
428 adev->mmhub.ras_funcs->ras_fini)
429 amdgpu_mmhub_ras_fini(adev);
430
431 if (adev->gmc.xgmi.ras_funcs &&
432 adev->gmc.xgmi.ras_funcs->ras_fini)
433 adev->gmc.xgmi.ras_funcs->ras_fini(adev);
434}
435
436
437
438
439
440
441
442
443#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
444#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
445
446int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
447{
448 struct amdgpu_ring *ring;
449 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
450 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP,
451 GFXHUB_FREE_VM_INV_ENGS_BITMAP};
452 unsigned i;
453 unsigned vmhub, inv_eng;
454
455 for (i = 0; i < adev->num_rings; ++i) {
456 ring = adev->rings[i];
457 vmhub = ring->funcs->vmhub;
458
459 if (ring == &adev->mes.ring)
460 continue;
461
462 inv_eng = ffs(vm_inv_engs[vmhub]);
463 if (!inv_eng) {
464 dev_err(adev->dev, "no VM inv eng for ring %s\n",
465 ring->name);
466 return -EINVAL;
467 }
468
469 ring->vm_inv_eng = inv_eng - 1;
470 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
471
472 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
473 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
474 }
475
476 return 0;
477}
478
479
480
481
482
483
484
485
486void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
487{
488 switch (adev->asic_type) {
489 case CHIP_RAVEN:
490 case CHIP_RENOIR:
491 if (amdgpu_tmz == 0) {
492 adev->gmc.tmz_enabled = false;
493 dev_info(adev->dev,
494 "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
495 } else {
496 adev->gmc.tmz_enabled = true;
497 dev_info(adev->dev,
498 "Trusted Memory Zone (TMZ) feature enabled\n");
499 }
500 break;
501 case CHIP_NAVI10:
502 case CHIP_NAVI14:
503 case CHIP_NAVI12:
504 case CHIP_VANGOGH:
505
506
507 if (amdgpu_tmz < 1) {
508 adev->gmc.tmz_enabled = false;
509 dev_info(adev->dev,
510 "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
511 } else {
512 adev->gmc.tmz_enabled = true;
513 dev_info(adev->dev,
514 "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
515 }
516 break;
517 default:
518 adev->gmc.tmz_enabled = false;
519 dev_warn(adev->dev,
520 "Trusted Memory Zone (TMZ) feature not supported\n");
521 break;
522 }
523}
524
525
526
527
528
529
530
531
532void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
533{
534 struct amdgpu_gmc *gmc = &adev->gmc;
535
536 switch (adev->asic_type) {
537 case CHIP_VEGA10:
538 case CHIP_VEGA20:
539 case CHIP_ARCTURUS:
540 case CHIP_ALDEBARAN:
541
542
543
544
545 if (amdgpu_noretry == -1)
546 gmc->noretry = 1;
547 else
548 gmc->noretry = amdgpu_noretry;
549 break;
550 case CHIP_RAVEN:
551 default:
552
553
554
555
556
557
558
559
560
561
562
563 if (amdgpu_noretry == -1)
564 gmc->noretry = 0;
565 else
566 gmc->noretry = amdgpu_noretry;
567 break;
568 }
569}
570
571void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
572 bool enable)
573{
574 struct amdgpu_vmhub *hub;
575 u32 tmp, reg, i;
576
577 hub = &adev->vmhub[hub_type];
578 for (i = 0; i < 16; i++) {
579 reg = hub->vm_context0_cntl + hub->ctx_distance * i;
580
581 tmp = RREG32(reg);
582 if (enable)
583 tmp |= hub->vm_cntx_cntl_vm_fault;
584 else
585 tmp &= ~hub->vm_cntx_cntl_vm_fault;
586
587 WREG32(reg, tmp);
588 }
589}
590
591void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
592{
593 unsigned size;
594
595
596
597
598
599
600
601
602
603 switch (adev->asic_type) {
604 case CHIP_VEGA10:
605 case CHIP_RAVEN:
606 case CHIP_RENOIR:
607 adev->mman.keep_stolen_vga_memory = true;
608 break;
609 default:
610 adev->mman.keep_stolen_vga_memory = false;
611 break;
612 }
613
614 if (amdgpu_sriov_vf(adev) ||
615 !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
616 size = 0;
617 } else {
618 size = amdgpu_gmc_get_vbios_fb_size(adev);
619
620 if (adev->mman.keep_stolen_vga_memory)
621 size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
622 }
623
624
625 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
626 size = 0;
627
628 if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
629 adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
630 adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
631 } else {
632 adev->mman.stolen_vga_size = size;
633 adev->mman.stolen_extended_size = 0;
634 }
635}
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
654{
655 int i;
656 uint64_t flags = adev->gart.gart_pte_flags;
657
658
659 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
660 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
661 u64 vram_addr = adev->vm_manager.vram_base_offset -
662 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
663 u64 vram_end = vram_addr + vram_size;
664 u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
665
666 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
667 flags |= AMDGPU_PTE_WRITEABLE;
668 flags |= AMDGPU_PTE_SNOOPED;
669 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
670 flags |= AMDGPU_PDE_PTE;
671
672
673
674
675 for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
676 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
677
678
679
680
681
682 flags = AMDGPU_PTE_VALID;
683 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
684
685 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
686}
687
688
689
690
691
692
693
694
695uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
696{
697 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
698}
699
700
701
702
703
704
705
706
707uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
708{
709 return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
710}
711
712
713
714
715
716
717
718
719uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
720{
721 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
722}
723