1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/firmware.h>
24#include "amdgpu.h"
25#include "gmc_v9_0.h"
26
27#include "vega10/soc15ip.h"
28#include "vega10/HDP/hdp_4_0_offset.h"
29#include "vega10/HDP/hdp_4_0_sh_mask.h"
30#include "vega10/GC/gc_9_0_sh_mask.h"
31#include "vega10/vega10_enum.h"
32
33#include "soc15_common.h"
34
35#include "nbio_v6_1.h"
36#include "gfxhub_v1_0.h"
37#include "mmhub_v1_0.h"
38
39#define mmDF_CS_AON0_DramBaseAddress0 0x0044
40#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
41
42#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
43#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
44#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
45#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
46#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
47#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
48#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
49#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
50#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
51#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
52
53
54#define AMDGPU_NUM_OF_VMIDS 8
55
56static const u32 golden_settings_vega10_hdp[] =
57{
58 0xf64, 0x0fffffff, 0x00000000,
59 0xf65, 0x0fffffff, 0x00000000,
60 0xf66, 0x0fffffff, 0x00000000,
61 0xf67, 0x0fffffff, 0x00000000,
62 0xf68, 0x0fffffff, 0x00000000,
63 0xf6a, 0x0fffffff, 0x00000000,
64 0xf6b, 0x0fffffff, 0x00000000,
65 0xf6c, 0x0fffffff, 0x00000000,
66 0xf6d, 0x0fffffff, 0x00000000,
67 0xf6e, 0x0fffffff, 0x00000000,
68};
69
70static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
71 struct amdgpu_irq_src *src,
72 unsigned type,
73 enum amdgpu_interrupt_state state)
74{
75 struct amdgpu_vmhub *hub;
76 u32 tmp, reg, bits, i;
77
78 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
85
86 switch (state) {
87 case AMDGPU_IRQ_STATE_DISABLE:
88
89 hub = &adev->vmhub[AMDGPU_MMHUB];
90 for (i = 0; i< 16; i++) {
91 reg = hub->vm_context0_cntl + i;
92 tmp = RREG32(reg);
93 tmp &= ~bits;
94 WREG32(reg, tmp);
95 }
96
97
98 hub = &adev->vmhub[AMDGPU_GFXHUB];
99 for (i = 0; i < 16; i++) {
100 reg = hub->vm_context0_cntl + i;
101 tmp = RREG32(reg);
102 tmp &= ~bits;
103 WREG32(reg, tmp);
104 }
105 break;
106 case AMDGPU_IRQ_STATE_ENABLE:
107
108 hub = &adev->vmhub[AMDGPU_MMHUB];
109 for (i = 0; i< 16; i++) {
110 reg = hub->vm_context0_cntl + i;
111 tmp = RREG32(reg);
112 tmp |= bits;
113 WREG32(reg, tmp);
114 }
115
116
117 hub = &adev->vmhub[AMDGPU_GFXHUB];
118 for (i = 0; i < 16; i++) {
119 reg = hub->vm_context0_cntl + i;
120 tmp = RREG32(reg);
121 tmp |= bits;
122 WREG32(reg, tmp);
123 }
124 break;
125 default:
126 break;
127 }
128
129 return 0;
130}
131
132static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
133 struct amdgpu_irq_src *source,
134 struct amdgpu_iv_entry *entry)
135{
136 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
137 uint32_t status = 0;
138 u64 addr;
139
140 addr = (u64)entry->src_data[0] << 12;
141 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
142
143 if (!amdgpu_sriov_vf(adev)) {
144 status = RREG32(hub->vm_l2_pro_fault_status);
145 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
146 }
147
148 if (printk_ratelimit()) {
149 dev_err(adev->dev,
150 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
151 entry->vm_id_src ? "mmhub" : "gfxhub",
152 entry->src_id, entry->ring_id, entry->vm_id,
153 entry->pas_id);
154 dev_err(adev->dev, " at page 0x%016llx from %d\n",
155 addr, entry->client_id);
156 if (!amdgpu_sriov_vf(adev))
157 dev_err(adev->dev,
158 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
159 status);
160 }
161
162 return 0;
163}
164
165static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
166 .set = gmc_v9_0_vm_fault_interrupt_state,
167 .process = gmc_v9_0_process_interrupt,
168};
169
170static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
171{
172 adev->mc.vm_fault.num_types = 1;
173 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
174}
175
176static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
177{
178 u32 req = 0;
179
180
181 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
182 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
183 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
184 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
190 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
191
192 return req;
193}
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
211 uint32_t vmid)
212{
213
214 const unsigned eng = 17;
215 unsigned i, j;
216
217
218 nbio_v6_1_hdp_flush(adev);
219
220 spin_lock(&adev->mc.invalidate_lock);
221
222 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
223 struct amdgpu_vmhub *hub = &adev->vmhub[i];
224 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
225
226 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
227
228
229 for (j = 0; j < 100; j++) {
230 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
231 tmp &= 1 << vmid;
232 if (tmp)
233 break;
234 cpu_relax();
235 }
236 if (j < 100)
237 continue;
238
239
240 for (j = 0; j < adev->usec_timeout; j++) {
241 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
242 tmp &= 1 << vmid;
243 if (tmp)
244 break;
245 udelay(1);
246 }
247 if (j < adev->usec_timeout)
248 continue;
249
250 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
251 }
252
253 spin_unlock(&adev->mc.invalidate_lock);
254}
255
256
257
258
259
260
261
262
263
264
265
266
267static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
268 void *cpu_pt_addr,
269 uint32_t gpu_page_idx,
270 uint64_t addr,
271 uint64_t flags)
272{
273 void __iomem *ptr = (void *)cpu_pt_addr;
274 uint64_t value;
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 value = addr & 0x0000FFFFFFFFF000ULL;
312 value |= flags;
313 writeq(value, ptr + (gpu_page_idx * 8));
314 return 0;
315}
316
317static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
318 uint32_t flags)
319
320{
321 uint64_t pte_flag = 0;
322
323 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
324 pte_flag |= AMDGPU_PTE_EXECUTABLE;
325 if (flags & AMDGPU_VM_PAGE_READABLE)
326 pte_flag |= AMDGPU_PTE_READABLE;
327 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
328 pte_flag |= AMDGPU_PTE_WRITEABLE;
329
330 switch (flags & AMDGPU_VM_MTYPE_MASK) {
331 case AMDGPU_VM_MTYPE_DEFAULT:
332 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
333 break;
334 case AMDGPU_VM_MTYPE_NC:
335 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
336 break;
337 case AMDGPU_VM_MTYPE_WC:
338 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
339 break;
340 case AMDGPU_VM_MTYPE_CC:
341 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
342 break;
343 case AMDGPU_VM_MTYPE_UC:
344 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
345 break;
346 default:
347 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
348 break;
349 }
350
351 if (flags & AMDGPU_VM_PAGE_PRT)
352 pte_flag |= AMDGPU_PTE_PRT;
353
354 return pte_flag;
355}
356
357static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
358{
359 return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
360}
361
362static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
363 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
364 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
365 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
366 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
367 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
368};
369
370static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
371{
372 if (adev->gart.gart_funcs == NULL)
373 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
374}
375
376static int gmc_v9_0_early_init(void *handle)
377{
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379
380 gmc_v9_0_set_gart_funcs(adev);
381 gmc_v9_0_set_irq_funcs(adev);
382
383 return 0;
384}
385
386static int gmc_v9_0_late_init(void *handle)
387{
388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
389 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
390 unsigned i;
391
392 for(i = 0; i < adev->num_rings; ++i) {
393 struct amdgpu_ring *ring = adev->rings[i];
394 unsigned vmhub = ring->funcs->vmhub;
395
396 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
397 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
398 ring->idx, ring->name, ring->vm_inv_eng,
399 ring->funcs->vmhub);
400 }
401
402
403 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
404 BUG_ON(vm_inv_eng[i] > 17);
405
406 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
407}
408
409static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
410 struct amdgpu_mc *mc)
411{
412 u64 base = 0;
413 if (!amdgpu_sriov_vf(adev))
414 base = mmhub_v1_0_get_fb_location(adev);
415 amdgpu_vram_location(adev, &adev->mc, base);
416 adev->mc.gtt_base_align = 0;
417 amdgpu_gtt_location(adev, mc);
418}
419
420
421
422
423
424
425
426
427
428
429static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
430{
431 u32 tmp;
432 int chansize, numchan;
433
434
435 chansize = 128;
436
437 tmp = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_CS_AON0_DramBaseAddress0));
438 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
439 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
440 switch (tmp) {
441 case 0:
442 default:
443 numchan = 1;
444 break;
445 case 1:
446 numchan = 2;
447 break;
448 case 2:
449 numchan = 0;
450 break;
451 case 3:
452 numchan = 4;
453 break;
454 case 4:
455 numchan = 0;
456 break;
457 case 5:
458 numchan = 8;
459 break;
460 case 6:
461 numchan = 0;
462 break;
463 case 7:
464 numchan = 16;
465 break;
466 case 8:
467 numchan = 2;
468 break;
469 }
470 adev->mc.vram_width = numchan * chansize;
471
472
473 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
474 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
475
476 adev->mc.mc_vram_size =
477 nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
478 adev->mc.real_vram_size = adev->mc.mc_vram_size;
479 adev->mc.visible_vram_size = adev->mc.aper_size;
480
481
482 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
483 adev->mc.visible_vram_size = adev->mc.real_vram_size;
484
485
486
487
488 if (amdgpu_gart_size == -1)
489 adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
490 adev->mc.mc_vram_size);
491 else
492 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
493
494 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
495
496 return 0;
497}
498
499static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
500{
501 int r;
502
503 if (adev->gart.robj) {
504 WARN(1, "VEGA10 PCIE GART already initialized\n");
505 return 0;
506 }
507
508 r = amdgpu_gart_init(adev);
509 if (r)
510 return r;
511 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
512 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
513 AMDGPU_PTE_EXECUTABLE;
514 return amdgpu_gart_table_vram_alloc(adev);
515}
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
533{
534
535
536
537
538
539
540 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
541 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
542
543
544 if (adev->flags & AMD_IS_APU)
545 adev->vm_manager.num_level = 1;
546 else
547 adev->vm_manager.num_level = 3;
548 amdgpu_vm_manager_init(adev);
549
550
551
552 adev->vm_manager.vram_base_offset = 0;
553
554 return 0;
555}
556
557
558
559
560
561
562
563
564static void gmc_v9_0_vm_fini(struct amdgpu_device *adev)
565{
566 return;
567}
568
569static int gmc_v9_0_sw_init(void *handle)
570{
571 int r;
572 int dma_bits;
573 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
574
575 spin_lock_init(&adev->mc.invalidate_lock);
576
577 if (adev->flags & AMD_IS_APU) {
578 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
579 amdgpu_vm_adjust_size(adev, 64);
580 } else {
581
582 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
583
584
585
586
587
588 adev->vm_manager.vm_size = 1U << 18;
589 adev->vm_manager.block_size = 9;
590 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
591 adev->vm_manager.vm_size,
592 adev->vm_manager.block_size);
593 }
594
595
596 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
597 &adev->mc.vm_fault);
598 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
599 &adev->mc.vm_fault);
600
601 if (r)
602 return r;
603
604 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
605
606
607
608
609
610 adev->mc.mc_mask = 0xffffffffffffULL;
611
612
613
614
615
616
617 adev->need_dma32 = false;
618 dma_bits = adev->need_dma32 ? 32 : 44;
619 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
620 if (r) {
621 adev->need_dma32 = true;
622 dma_bits = 32;
623 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
624 }
625 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
626 if (r) {
627 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
628 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
629 }
630
631 r = gmc_v9_0_mc_init(adev);
632 if (r)
633 return r;
634
635
636 r = amdgpu_bo_init(adev);
637 if (r)
638 return r;
639
640 r = gmc_v9_0_gart_init(adev);
641 if (r)
642 return r;
643
644 if (!adev->vm_manager.enabled) {
645 r = gmc_v9_0_vm_init(adev);
646 if (r) {
647 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
648 return r;
649 }
650 adev->vm_manager.enabled = true;
651 }
652 return r;
653}
654
655
656
657
658
659
660
661
662static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
663{
664 amdgpu_gart_table_vram_free(adev);
665 amdgpu_gart_fini(adev);
666}
667
668static int gmc_v9_0_sw_fini(void *handle)
669{
670 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
671
672 if (adev->vm_manager.enabled) {
673 amdgpu_vm_manager_fini(adev);
674 gmc_v9_0_vm_fini(adev);
675 adev->vm_manager.enabled = false;
676 }
677 gmc_v9_0_gart_fini(adev);
678 amdgpu_gem_force_release(adev);
679 amdgpu_bo_fini(adev);
680
681 return 0;
682}
683
684static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
685{
686 switch (adev->asic_type) {
687 case CHIP_VEGA10:
688 break;
689 default:
690 break;
691 }
692}
693
694
695
696
697
698
699static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
700{
701 int r;
702 bool value;
703 u32 tmp;
704
705 amdgpu_program_register_sequence(adev,
706 golden_settings_vega10_hdp,
707 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
708
709 if (adev->gart.robj == NULL) {
710 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
711 return -EINVAL;
712 }
713 r = amdgpu_gart_table_vram_pin(adev);
714 if (r)
715 return r;
716
717
718 nbio_v6_1_hdp_flush(adev);
719
720 r = gfxhub_v1_0_gart_enable(adev);
721 if (r)
722 return r;
723
724 r = mmhub_v1_0_gart_enable(adev);
725 if (r)
726 return r;
727
728 tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL));
729 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
730 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL), tmp);
731
732 tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL));
733 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL), tmp);
734
735
736 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
737 value = false;
738 else
739 value = true;
740
741 gfxhub_v1_0_set_fault_enable_default(adev, value);
742 mmhub_v1_0_set_fault_enable_default(adev, value);
743
744 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
745
746 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
747 (unsigned)(adev->mc.gtt_size >> 20),
748 (unsigned long long)adev->gart.table_addr);
749 adev->gart.ready = true;
750 return 0;
751}
752
753static int gmc_v9_0_hw_init(void *handle)
754{
755 int r;
756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757
758
759 gmc_v9_0_init_golden_registers(adev);
760
761 r = gmc_v9_0_gart_enable(adev);
762
763 return r;
764}
765
766
767
768
769
770
771
772
773static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
774{
775 gfxhub_v1_0_gart_disable(adev);
776 mmhub_v1_0_gart_disable(adev);
777 amdgpu_gart_table_vram_unpin(adev);
778}
779
780static int gmc_v9_0_hw_fini(void *handle)
781{
782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
783
784 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
785 gmc_v9_0_gart_disable(adev);
786
787 return 0;
788}
789
790static int gmc_v9_0_suspend(void *handle)
791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793
794 gmc_v9_0_hw_fini(adev);
795
796 return 0;
797}
798
799static int gmc_v9_0_resume(void *handle)
800{
801 int r;
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
803
804 r = gmc_v9_0_hw_init(adev);
805 if (r)
806 return r;
807
808 amdgpu_vm_reset_all_ids(adev);
809
810 return 0;
811}
812
813static bool gmc_v9_0_is_idle(void *handle)
814{
815
816 return true;
817}
818
819static int gmc_v9_0_wait_for_idle(void *handle)
820{
821
822 return 0;
823}
824
825static int gmc_v9_0_soft_reset(void *handle)
826{
827
828 return 0;
829}
830
831static int gmc_v9_0_set_clockgating_state(void *handle,
832 enum amd_clockgating_state state)
833{
834 return 0;
835}
836
837static int gmc_v9_0_set_powergating_state(void *handle,
838 enum amd_powergating_state state)
839{
840 return 0;
841}
842
843const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
844 .name = "gmc_v9_0",
845 .early_init = gmc_v9_0_early_init,
846 .late_init = gmc_v9_0_late_init,
847 .sw_init = gmc_v9_0_sw_init,
848 .sw_fini = gmc_v9_0_sw_fini,
849 .hw_init = gmc_v9_0_hw_init,
850 .hw_fini = gmc_v9_0_hw_fini,
851 .suspend = gmc_v9_0_suspend,
852 .resume = gmc_v9_0_resume,
853 .is_idle = gmc_v9_0_is_idle,
854 .wait_for_idle = gmc_v9_0_wait_for_idle,
855 .soft_reset = gmc_v9_0_soft_reset,
856 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
857 .set_powergating_state = gmc_v9_0_set_powergating_state,
858};
859
860const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
861{
862 .type = AMD_IP_BLOCK_TYPE_GMC,
863 .major = 9,
864 .minor = 0,
865 .rev = 0,
866 .funcs = &gmc_v9_0_ip_funcs,
867};
868