1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/firmware.h>
24#include "amdgpu.h"
25#include "gmc_v9_0.h"
26
27#include "vega10/soc15ip.h"
28#include "vega10/HDP/hdp_4_0_offset.h"
29#include "vega10/HDP/hdp_4_0_sh_mask.h"
30#include "vega10/GC/gc_9_0_sh_mask.h"
31#include "vega10/vega10_enum.h"
32
33#include "soc15_common.h"
34
35#include "nbio_v6_1.h"
36#include "nbio_v7_0.h"
37#include "gfxhub_v1_0.h"
38#include "mmhub_v1_0.h"
39
40#define mmDF_CS_AON0_DramBaseAddress0 0x0044
41#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
42
43#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
44#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
45#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
46#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
47#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
48#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
49#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
50#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
51#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
52#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
53
54
55#define AMDGPU_NUM_OF_VMIDS 8
56
57static const u32 golden_settings_vega10_hdp[] =
58{
59 0xf64, 0x0fffffff, 0x00000000,
60 0xf65, 0x0fffffff, 0x00000000,
61 0xf66, 0x0fffffff, 0x00000000,
62 0xf67, 0x0fffffff, 0x00000000,
63 0xf68, 0x0fffffff, 0x00000000,
64 0xf6a, 0x0fffffff, 0x00000000,
65 0xf6b, 0x0fffffff, 0x00000000,
66 0xf6c, 0x0fffffff, 0x00000000,
67 0xf6d, 0x0fffffff, 0x00000000,
68 0xf6e, 0x0fffffff, 0x00000000,
69};
70
71static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
72 struct amdgpu_irq_src *src,
73 unsigned type,
74 enum amdgpu_interrupt_state state)
75{
76 struct amdgpu_vmhub *hub;
77 u32 tmp, reg, bits, i;
78
79 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
85 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
86
87 switch (state) {
88 case AMDGPU_IRQ_STATE_DISABLE:
89
90 hub = &adev->vmhub[AMDGPU_MMHUB];
91 for (i = 0; i< 16; i++) {
92 reg = hub->vm_context0_cntl + i;
93 tmp = RREG32(reg);
94 tmp &= ~bits;
95 WREG32(reg, tmp);
96 }
97
98
99 hub = &adev->vmhub[AMDGPU_GFXHUB];
100 for (i = 0; i < 16; i++) {
101 reg = hub->vm_context0_cntl + i;
102 tmp = RREG32(reg);
103 tmp &= ~bits;
104 WREG32(reg, tmp);
105 }
106 break;
107 case AMDGPU_IRQ_STATE_ENABLE:
108
109 hub = &adev->vmhub[AMDGPU_MMHUB];
110 for (i = 0; i< 16; i++) {
111 reg = hub->vm_context0_cntl + i;
112 tmp = RREG32(reg);
113 tmp |= bits;
114 WREG32(reg, tmp);
115 }
116
117
118 hub = &adev->vmhub[AMDGPU_GFXHUB];
119 for (i = 0; i < 16; i++) {
120 reg = hub->vm_context0_cntl + i;
121 tmp = RREG32(reg);
122 tmp |= bits;
123 WREG32(reg, tmp);
124 }
125 break;
126 default:
127 break;
128 }
129
130 return 0;
131}
132
133static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
134 struct amdgpu_irq_src *source,
135 struct amdgpu_iv_entry *entry)
136{
137 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
138 uint32_t status = 0;
139 u64 addr;
140
141 addr = (u64)entry->src_data[0] << 12;
142 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
143
144 if (!amdgpu_sriov_vf(adev)) {
145 status = RREG32(hub->vm_l2_pro_fault_status);
146 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
147 }
148
149 if (printk_ratelimit()) {
150 dev_err(adev->dev,
151 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
152 entry->vm_id_src ? "mmhub" : "gfxhub",
153 entry->src_id, entry->ring_id, entry->vm_id,
154 entry->pas_id);
155 dev_err(adev->dev, " at page 0x%016llx from %d\n",
156 addr, entry->client_id);
157 if (!amdgpu_sriov_vf(adev))
158 dev_err(adev->dev,
159 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
160 status);
161 }
162
163 return 0;
164}
165
166static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
167 .set = gmc_v9_0_vm_fault_interrupt_state,
168 .process = gmc_v9_0_process_interrupt,
169};
170
171static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
172{
173 adev->mc.vm_fault.num_types = 1;
174 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
175}
176
177static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
178{
179 u32 req = 0;
180
181
182 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
183 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
184 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
190 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
191 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
192
193 return req;
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
212 uint32_t vmid)
213{
214
215 const unsigned eng = 17;
216 unsigned i, j;
217
218
219 if (adev->flags & AMD_IS_APU)
220 nbio_v7_0_hdp_flush(adev);
221 else
222 nbio_v6_1_hdp_flush(adev);
223
224 spin_lock(&adev->mc.invalidate_lock);
225
226 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
227 struct amdgpu_vmhub *hub = &adev->vmhub[i];
228 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
229
230 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
231
232
233 for (j = 0; j < 100; j++) {
234 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
235 tmp &= 1 << vmid;
236 if (tmp)
237 break;
238 cpu_relax();
239 }
240 if (j < 100)
241 continue;
242
243
244 for (j = 0; j < adev->usec_timeout; j++) {
245 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
246 tmp &= 1 << vmid;
247 if (tmp)
248 break;
249 udelay(1);
250 }
251 if (j < adev->usec_timeout)
252 continue;
253
254 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
255 }
256
257 spin_unlock(&adev->mc.invalidate_lock);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
272 void *cpu_pt_addr,
273 uint32_t gpu_page_idx,
274 uint64_t addr,
275 uint64_t flags)
276{
277 void __iomem *ptr = (void *)cpu_pt_addr;
278 uint64_t value;
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315 value = addr & 0x0000FFFFFFFFF000ULL;
316 value |= flags;
317 writeq(value, ptr + (gpu_page_idx * 8));
318 return 0;
319}
320
321static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
322 uint32_t flags)
323
324{
325 uint64_t pte_flag = 0;
326
327 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
328 pte_flag |= AMDGPU_PTE_EXECUTABLE;
329 if (flags & AMDGPU_VM_PAGE_READABLE)
330 pte_flag |= AMDGPU_PTE_READABLE;
331 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
332 pte_flag |= AMDGPU_PTE_WRITEABLE;
333
334 switch (flags & AMDGPU_VM_MTYPE_MASK) {
335 case AMDGPU_VM_MTYPE_DEFAULT:
336 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
337 break;
338 case AMDGPU_VM_MTYPE_NC:
339 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
340 break;
341 case AMDGPU_VM_MTYPE_WC:
342 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
343 break;
344 case AMDGPU_VM_MTYPE_CC:
345 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
346 break;
347 case AMDGPU_VM_MTYPE_UC:
348 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
349 break;
350 default:
351 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
352 break;
353 }
354
355 if (flags & AMDGPU_VM_PAGE_PRT)
356 pte_flag |= AMDGPU_PTE_PRT;
357
358 return pte_flag;
359}
360
361static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
362{
363 addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
364 BUG_ON(addr & 0xFFFF00000000003FULL);
365 return addr;
366}
367
368static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
369 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
370 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
371 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
372 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
373 .get_vm_pde = gmc_v9_0_get_vm_pde
374};
375
376static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
377{
378 if (adev->gart.gart_funcs == NULL)
379 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
380}
381
382static int gmc_v9_0_early_init(void *handle)
383{
384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
385
386 gmc_v9_0_set_gart_funcs(adev);
387 gmc_v9_0_set_irq_funcs(adev);
388
389 return 0;
390}
391
392static int gmc_v9_0_late_init(void *handle)
393{
394 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
395 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
396 unsigned i;
397
398 for(i = 0; i < adev->num_rings; ++i) {
399 struct amdgpu_ring *ring = adev->rings[i];
400 unsigned vmhub = ring->funcs->vmhub;
401
402 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
403 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
404 ring->idx, ring->name, ring->vm_inv_eng,
405 ring->funcs->vmhub);
406 }
407
408
409 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
410 BUG_ON(vm_inv_eng[i] > 17);
411
412 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
413}
414
415static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
416 struct amdgpu_mc *mc)
417{
418 u64 base = 0;
419 if (!amdgpu_sriov_vf(adev))
420 base = mmhub_v1_0_get_fb_location(adev);
421 amdgpu_vram_location(adev, &adev->mc, base);
422 adev->mc.gtt_base_align = 0;
423 amdgpu_gtt_location(adev, mc);
424
425 if (adev->flags & AMD_IS_APU)
426 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
427 else
428 adev->vm_manager.vram_base_offset = 0;
429}
430
431
432
433
434
435
436
437
438
439
440static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
441{
442 u32 tmp;
443 int chansize, numchan;
444
445
446 chansize = 128;
447
448 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
449 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
450 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
451 switch (tmp) {
452 case 0:
453 default:
454 numchan = 1;
455 break;
456 case 1:
457 numchan = 2;
458 break;
459 case 2:
460 numchan = 0;
461 break;
462 case 3:
463 numchan = 4;
464 break;
465 case 4:
466 numchan = 0;
467 break;
468 case 5:
469 numchan = 8;
470 break;
471 case 6:
472 numchan = 0;
473 break;
474 case 7:
475 numchan = 16;
476 break;
477 case 8:
478 numchan = 2;
479 break;
480 }
481 adev->mc.vram_width = numchan * chansize;
482
483
484 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
485 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
486
487 adev->mc.mc_vram_size =
488 ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
489 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
490 adev->mc.real_vram_size = adev->mc.mc_vram_size;
491 adev->mc.visible_vram_size = adev->mc.aper_size;
492
493
494 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
495 adev->mc.visible_vram_size = adev->mc.real_vram_size;
496
497
498
499
500 if (amdgpu_gart_size == -1)
501 adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
502 adev->mc.mc_vram_size);
503 else
504 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
505
506 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
507
508 return 0;
509}
510
511static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
512{
513 int r;
514
515 if (adev->gart.robj) {
516 WARN(1, "VEGA10 PCIE GART already initialized\n");
517 return 0;
518 }
519
520 r = amdgpu_gart_init(adev);
521 if (r)
522 return r;
523 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
524 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
525 AMDGPU_PTE_EXECUTABLE;
526 return amdgpu_gart_table_vram_alloc(adev);
527}
528
529static int gmc_v9_0_sw_init(void *handle)
530{
531 int r;
532 int dma_bits;
533 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
534
535 gfxhub_v1_0_init(adev);
536 mmhub_v1_0_init(adev);
537
538 spin_lock_init(&adev->mc.invalidate_lock);
539
540 if (adev->flags & AMD_IS_APU) {
541 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
542 amdgpu_vm_adjust_size(adev, 64);
543 } else {
544
545 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
546
547
548
549
550
551 adev->vm_manager.vm_size = 1U << 18;
552 adev->vm_manager.block_size = 9;
553 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
554 adev->vm_manager.vm_size,
555 adev->vm_manager.block_size);
556 }
557
558
559 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
560 &adev->mc.vm_fault);
561 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
562 &adev->mc.vm_fault);
563
564 if (r)
565 return r;
566
567 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
568
569
570
571
572
573 adev->mc.mc_mask = 0xffffffffffffULL;
574
575
576
577
578
579 adev->mc.stolen_size = 8 * 1024 * 1024;
580
581
582
583
584
585
586 adev->need_dma32 = false;
587 dma_bits = adev->need_dma32 ? 32 : 44;
588 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
589 if (r) {
590 adev->need_dma32 = true;
591 dma_bits = 32;
592 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
593 }
594 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
595 if (r) {
596 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
597 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
598 }
599
600 r = gmc_v9_0_mc_init(adev);
601 if (r)
602 return r;
603
604
605 r = amdgpu_bo_init(adev);
606 if (r)
607 return r;
608
609 r = gmc_v9_0_gart_init(adev);
610 if (r)
611 return r;
612
613
614
615
616
617
618
619 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
620 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
621
622
623 if (adev->flags & AMD_IS_APU)
624 adev->vm_manager.num_level = 1;
625 else
626 adev->vm_manager.num_level = 3;
627 amdgpu_vm_manager_init(adev);
628
629 return 0;
630}
631
632
633
634
635
636
637
638
639static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
640{
641 amdgpu_gart_table_vram_free(adev);
642 amdgpu_gart_fini(adev);
643}
644
645static int gmc_v9_0_sw_fini(void *handle)
646{
647 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
648
649 amdgpu_vm_manager_fini(adev);
650 gmc_v9_0_gart_fini(adev);
651 amdgpu_gem_force_release(adev);
652 amdgpu_bo_fini(adev);
653
654 return 0;
655}
656
657static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
658{
659 switch (adev->asic_type) {
660 case CHIP_VEGA10:
661 break;
662 case CHIP_RAVEN:
663 break;
664 default:
665 break;
666 }
667}
668
669
670
671
672
673
674static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
675{
676 int r;
677 bool value;
678 u32 tmp;
679
680 amdgpu_program_register_sequence(adev,
681 golden_settings_vega10_hdp,
682 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
683
684 if (adev->gart.robj == NULL) {
685 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
686 return -EINVAL;
687 }
688 r = amdgpu_gart_table_vram_pin(adev);
689 if (r)
690 return r;
691
692
693 if (adev->flags & AMD_IS_APU)
694 nbio_v7_0_hdp_flush(adev);
695 else
696 nbio_v6_1_hdp_flush(adev);
697
698 switch (adev->asic_type) {
699 case CHIP_RAVEN:
700 mmhub_v1_0_initialize_power_gating(adev);
701 mmhub_v1_0_update_power_gating(adev, true);
702 break;
703 default:
704 break;
705 }
706
707 r = gfxhub_v1_0_gart_enable(adev);
708 if (r)
709 return r;
710
711 r = mmhub_v1_0_gart_enable(adev);
712 if (r)
713 return r;
714
715 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
716 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
717 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
718
719 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
720 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
721
722
723 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
724 value = false;
725 else
726 value = true;
727
728 gfxhub_v1_0_set_fault_enable_default(adev, value);
729 mmhub_v1_0_set_fault_enable_default(adev, value);
730
731 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
732
733 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
734 (unsigned)(adev->mc.gtt_size >> 20),
735 (unsigned long long)adev->gart.table_addr);
736 adev->gart.ready = true;
737 return 0;
738}
739
740static int gmc_v9_0_hw_init(void *handle)
741{
742 int r;
743 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
744
745
746 gmc_v9_0_init_golden_registers(adev);
747
748 r = gmc_v9_0_gart_enable(adev);
749
750 return r;
751}
752
753
754
755
756
757
758
759
760static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
761{
762 gfxhub_v1_0_gart_disable(adev);
763 mmhub_v1_0_gart_disable(adev);
764 amdgpu_gart_table_vram_unpin(adev);
765}
766
767static int gmc_v9_0_hw_fini(void *handle)
768{
769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
770
771 if (amdgpu_sriov_vf(adev)) {
772
773 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
774 return 0;
775 }
776
777 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
778 gmc_v9_0_gart_disable(adev);
779
780 return 0;
781}
782
783static int gmc_v9_0_suspend(void *handle)
784{
785 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
786
787 gmc_v9_0_hw_fini(adev);
788
789 return 0;
790}
791
792static int gmc_v9_0_resume(void *handle)
793{
794 int r;
795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
796
797 r = gmc_v9_0_hw_init(adev);
798 if (r)
799 return r;
800
801 amdgpu_vm_reset_all_ids(adev);
802
803 return 0;
804}
805
806static bool gmc_v9_0_is_idle(void *handle)
807{
808
809 return true;
810}
811
812static int gmc_v9_0_wait_for_idle(void *handle)
813{
814
815 return 0;
816}
817
818static int gmc_v9_0_soft_reset(void *handle)
819{
820
821 return 0;
822}
823
824static int gmc_v9_0_set_clockgating_state(void *handle,
825 enum amd_clockgating_state state)
826{
827 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
828
829 return mmhub_v1_0_set_clockgating(adev, state);
830}
831
832static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
833{
834 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
835
836 mmhub_v1_0_get_clockgating(adev, flags);
837}
838
839static int gmc_v9_0_set_powergating_state(void *handle,
840 enum amd_powergating_state state)
841{
842 return 0;
843}
844
845const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
846 .name = "gmc_v9_0",
847 .early_init = gmc_v9_0_early_init,
848 .late_init = gmc_v9_0_late_init,
849 .sw_init = gmc_v9_0_sw_init,
850 .sw_fini = gmc_v9_0_sw_fini,
851 .hw_init = gmc_v9_0_hw_init,
852 .hw_fini = gmc_v9_0_hw_fini,
853 .suspend = gmc_v9_0_suspend,
854 .resume = gmc_v9_0_resume,
855 .is_idle = gmc_v9_0_is_idle,
856 .wait_for_idle = gmc_v9_0_wait_for_idle,
857 .soft_reset = gmc_v9_0_soft_reset,
858 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
859 .set_powergating_state = gmc_v9_0_set_powergating_state,
860 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
861};
862
863const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
864{
865 .type = AMD_IP_BLOCK_TYPE_GMC,
866 .major = 9,
867 .minor = 0,
868 .rev = 0,
869 .funcs = &gmc_v9_0_ip_funcs,
870};
871