1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <linux/pci.h>
26
27#include <drm/drm_cache.h>
28
29#include "amdgpu.h"
30#include "gmc_v9_0.h"
31#include "amdgpu_atomfirmware.h"
32#include "amdgpu_gem.h"
33
34#include "hdp/hdp_4_0_offset.h"
35#include "hdp/hdp_4_0_sh_mask.h"
36#include "gc/gc_9_0_sh_mask.h"
37#include "dce/dce_12_0_offset.h"
38#include "dce/dce_12_0_sh_mask.h"
39#include "vega10_enum.h"
40#include "mmhub/mmhub_1_0_offset.h"
41#include "athub/athub_1_0_offset.h"
42#include "oss/osssys_4_0_offset.h"
43
44#include "soc15.h"
45#include "soc15_common.h"
46#include "umc/umc_6_0_sh_mask.h"
47
48#include "gfxhub_v1_0.h"
49#include "mmhub_v1_0.h"
50#include "gfxhub_v1_1.h"
51
52#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
53
54#include "amdgpu_ras.h"
55
56
57#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
58#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
59#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
60#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
61#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
62#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
63
64
65#define AMDGPU_NUM_OF_VMIDS 8
66
67static const u32 golden_settings_vega10_hdp[] =
68{
69 0xf64, 0x0fffffff, 0x00000000,
70 0xf65, 0x0fffffff, 0x00000000,
71 0xf66, 0x0fffffff, 0x00000000,
72 0xf67, 0x0fffffff, 0x00000000,
73 0xf68, 0x0fffffff, 0x00000000,
74 0xf6a, 0x0fffffff, 0x00000000,
75 0xf6b, 0x0fffffff, 0x00000000,
76 0xf6c, 0x0fffffff, 0x00000000,
77 0xf6d, 0x0fffffff, 0x00000000,
78 0xf6e, 0x0fffffff, 0x00000000,
79};
80
81static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
82{
83 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
84 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
85};
86
87static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
88{
89 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
90 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
91};
92
93static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
94 (0x000143c0 + 0x00000000),
95 (0x000143c0 + 0x00000800),
96 (0x000143c0 + 0x00001000),
97 (0x000143c0 + 0x00001800),
98 (0x000543c0 + 0x00000000),
99 (0x000543c0 + 0x00000800),
100 (0x000543c0 + 0x00001000),
101 (0x000543c0 + 0x00001800),
102 (0x000943c0 + 0x00000000),
103 (0x000943c0 + 0x00000800),
104 (0x000943c0 + 0x00001000),
105 (0x000943c0 + 0x00001800),
106 (0x000d43c0 + 0x00000000),
107 (0x000d43c0 + 0x00000800),
108 (0x000d43c0 + 0x00001000),
109 (0x000d43c0 + 0x00001800),
110 (0x001143c0 + 0x00000000),
111 (0x001143c0 + 0x00000800),
112 (0x001143c0 + 0x00001000),
113 (0x001143c0 + 0x00001800),
114 (0x001543c0 + 0x00000000),
115 (0x001543c0 + 0x00000800),
116 (0x001543c0 + 0x00001000),
117 (0x001543c0 + 0x00001800),
118 (0x001943c0 + 0x00000000),
119 (0x001943c0 + 0x00000800),
120 (0x001943c0 + 0x00001000),
121 (0x001943c0 + 0x00001800),
122 (0x001d43c0 + 0x00000000),
123 (0x001d43c0 + 0x00000800),
124 (0x001d43c0 + 0x00001000),
125 (0x001d43c0 + 0x00001800),
126};
127
128static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
129 (0x000143e0 + 0x00000000),
130 (0x000143e0 + 0x00000800),
131 (0x000143e0 + 0x00001000),
132 (0x000143e0 + 0x00001800),
133 (0x000543e0 + 0x00000000),
134 (0x000543e0 + 0x00000800),
135 (0x000543e0 + 0x00001000),
136 (0x000543e0 + 0x00001800),
137 (0x000943e0 + 0x00000000),
138 (0x000943e0 + 0x00000800),
139 (0x000943e0 + 0x00001000),
140 (0x000943e0 + 0x00001800),
141 (0x000d43e0 + 0x00000000),
142 (0x000d43e0 + 0x00000800),
143 (0x000d43e0 + 0x00001000),
144 (0x000d43e0 + 0x00001800),
145 (0x001143e0 + 0x00000000),
146 (0x001143e0 + 0x00000800),
147 (0x001143e0 + 0x00001000),
148 (0x001143e0 + 0x00001800),
149 (0x001543e0 + 0x00000000),
150 (0x001543e0 + 0x00000800),
151 (0x001543e0 + 0x00001000),
152 (0x001543e0 + 0x00001800),
153 (0x001943e0 + 0x00000000),
154 (0x001943e0 + 0x00000800),
155 (0x001943e0 + 0x00001000),
156 (0x001943e0 + 0x00001800),
157 (0x001d43e0 + 0x00000000),
158 (0x001d43e0 + 0x00000800),
159 (0x001d43e0 + 0x00001000),
160 (0x001d43e0 + 0x00001800),
161};
162
163static const uint32_t ecc_umc_mcumc_status_addrs[] = {
164 (0x000143c2 + 0x00000000),
165 (0x000143c2 + 0x00000800),
166 (0x000143c2 + 0x00001000),
167 (0x000143c2 + 0x00001800),
168 (0x000543c2 + 0x00000000),
169 (0x000543c2 + 0x00000800),
170 (0x000543c2 + 0x00001000),
171 (0x000543c2 + 0x00001800),
172 (0x000943c2 + 0x00000000),
173 (0x000943c2 + 0x00000800),
174 (0x000943c2 + 0x00001000),
175 (0x000943c2 + 0x00001800),
176 (0x000d43c2 + 0x00000000),
177 (0x000d43c2 + 0x00000800),
178 (0x000d43c2 + 0x00001000),
179 (0x000d43c2 + 0x00001800),
180 (0x001143c2 + 0x00000000),
181 (0x001143c2 + 0x00000800),
182 (0x001143c2 + 0x00001000),
183 (0x001143c2 + 0x00001800),
184 (0x001543c2 + 0x00000000),
185 (0x001543c2 + 0x00000800),
186 (0x001543c2 + 0x00001000),
187 (0x001543c2 + 0x00001800),
188 (0x001943c2 + 0x00000000),
189 (0x001943c2 + 0x00000800),
190 (0x001943c2 + 0x00001000),
191 (0x001943c2 + 0x00001800),
192 (0x001d43c2 + 0x00000000),
193 (0x001d43c2 + 0x00000800),
194 (0x001d43c2 + 0x00001000),
195 (0x001d43c2 + 0x00001800),
196};
197
198static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
199 struct amdgpu_irq_src *src,
200 unsigned type,
201 enum amdgpu_interrupt_state state)
202{
203 u32 bits, i, tmp, reg;
204
205 bits = 0x7f;
206
207 switch (state) {
208 case AMDGPU_IRQ_STATE_DISABLE:
209 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
210 reg = ecc_umc_mcumc_ctrl_addrs[i];
211 tmp = RREG32(reg);
212 tmp &= ~bits;
213 WREG32(reg, tmp);
214 }
215 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
216 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
217 tmp = RREG32(reg);
218 tmp &= ~bits;
219 WREG32(reg, tmp);
220 }
221 break;
222 case AMDGPU_IRQ_STATE_ENABLE:
223 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
224 reg = ecc_umc_mcumc_ctrl_addrs[i];
225 tmp = RREG32(reg);
226 tmp |= bits;
227 WREG32(reg, tmp);
228 }
229 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
230 reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
231 tmp = RREG32(reg);
232 tmp |= bits;
233 WREG32(reg, tmp);
234 }
235 break;
236 default:
237 break;
238 }
239
240 return 0;
241}
242
243static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
244 struct amdgpu_iv_entry *entry)
245{
246 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
247 amdgpu_ras_reset_gpu(adev, 0);
248 return AMDGPU_RAS_UE;
249}
250
251static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
252 struct amdgpu_irq_src *source,
253 struct amdgpu_iv_entry *entry)
254{
255 struct ras_common_if *ras_if = adev->gmc.ras_if;
256 struct ras_dispatch_if ih_data = {
257 .entry = entry,
258 };
259
260 if (!ras_if)
261 return 0;
262
263 ih_data.head = *ras_if;
264
265 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
266 return 0;
267}
268
269static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
270 struct amdgpu_irq_src *src,
271 unsigned type,
272 enum amdgpu_interrupt_state state)
273{
274 struct amdgpu_vmhub *hub;
275 u32 tmp, reg, bits, i, j;
276
277 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
278 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
279 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
280 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
281 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
282 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
283 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
284
285 switch (state) {
286 case AMDGPU_IRQ_STATE_DISABLE:
287 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
288 hub = &adev->vmhub[j];
289 for (i = 0; i < 16; i++) {
290 reg = hub->vm_context0_cntl + i;
291 tmp = RREG32(reg);
292 tmp &= ~bits;
293 WREG32(reg, tmp);
294 }
295 }
296 break;
297 case AMDGPU_IRQ_STATE_ENABLE:
298 for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
299 hub = &adev->vmhub[j];
300 for (i = 0; i < 16; i++) {
301 reg = hub->vm_context0_cntl + i;
302 tmp = RREG32(reg);
303 tmp |= bits;
304 WREG32(reg, tmp);
305 }
306 }
307 default:
308 break;
309 }
310
311 return 0;
312}
313
314static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
315 struct amdgpu_irq_src *source,
316 struct amdgpu_iv_entry *entry)
317{
318 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
319 bool retry_fault = !!(entry->src_data[1] & 0x80);
320 uint32_t status = 0;
321 u64 addr;
322
323 addr = (u64)entry->src_data[0] << 12;
324 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
325
326 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
327 entry->timestamp))
328 return 1;
329
330
331 if (!amdgpu_sriov_vf(adev)) {
332 status = RREG32(hub->vm_l2_pro_fault_status);
333 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
334 }
335
336 if (printk_ratelimit()) {
337 struct amdgpu_task_info task_info;
338
339 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
340 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
341
342 dev_err(adev->dev,
343 "[%s] %s page fault (src_id:%u ring:%u vmid:%u "
344 "pasid:%u, for process %s pid %d thread %s pid %d)\n",
345 entry->vmid_src ? "mmhub" : "gfxhub",
346 retry_fault ? "retry" : "no-retry",
347 entry->src_id, entry->ring_id, entry->vmid,
348 entry->pasid, task_info.process_name, task_info.tgid,
349 task_info.task_name, task_info.pid);
350 dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n",
351 addr, entry->client_id);
352 if (!amdgpu_sriov_vf(adev))
353 dev_err(adev->dev,
354 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
355 status);
356 }
357
358 return 0;
359}
360
361static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
362 .set = gmc_v9_0_vm_fault_interrupt_state,
363 .process = gmc_v9_0_process_interrupt,
364};
365
366
367static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
368 .set = gmc_v9_0_ecc_interrupt_state,
369 .process = gmc_v9_0_process_ecc_irq,
370};
371
372static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
373{
374 adev->gmc.vm_fault.num_types = 1;
375 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
376
377 adev->gmc.ecc_irq.num_types = 1;
378 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
379}
380
381static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
382 uint32_t flush_type)
383{
384 u32 req = 0;
385
386 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
387 PER_VMID_INVALIDATE_REQ, 1 << vmid);
388 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
389 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
390 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
391 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
392 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
393 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
394 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
395 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
396
397 return req;
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
417 uint32_t vmid, uint32_t flush_type)
418{
419 const unsigned eng = 17;
420 unsigned i, j;
421
422 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
423 struct amdgpu_vmhub *hub = &adev->vmhub[i];
424 u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
425
426
427
428
429 if (adev->gfx.kiq.ring.sched.ready &&
430 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
431 !adev->in_gpu_reset) {
432 uint32_t req = hub->vm_inv_eng0_req + eng;
433 uint32_t ack = hub->vm_inv_eng0_ack + eng;
434
435 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
436 1 << vmid);
437 continue;
438 }
439
440 spin_lock(&adev->gmc.invalidate_lock);
441 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
442 for (j = 0; j < adev->usec_timeout; j++) {
443 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
444 if (tmp & (1 << vmid))
445 break;
446 udelay(1);
447 }
448 spin_unlock(&adev->gmc.invalidate_lock);
449 if (j < adev->usec_timeout)
450 continue;
451
452 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
453 }
454}
455
456static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
457 unsigned vmid, uint64_t pd_addr)
458{
459 struct amdgpu_device *adev = ring->adev;
460 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
461 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
462 unsigned eng = ring->vm_inv_eng;
463
464 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
465 lower_32_bits(pd_addr));
466
467 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
468 upper_32_bits(pd_addr));
469
470 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
471 hub->vm_inv_eng0_ack + eng,
472 req, 1 << vmid);
473
474 return pd_addr;
475}
476
477static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
478 unsigned pasid)
479{
480 struct amdgpu_device *adev = ring->adev;
481 uint32_t reg;
482
483 if (ring->funcs->vmhub == AMDGPU_GFXHUB)
484 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
485 else
486 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
487
488 amdgpu_ring_emit_wreg(ring, reg, pasid);
489}
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
524 uint32_t flags)
525
526{
527 uint64_t pte_flag = 0;
528
529 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
530 pte_flag |= AMDGPU_PTE_EXECUTABLE;
531 if (flags & AMDGPU_VM_PAGE_READABLE)
532 pte_flag |= AMDGPU_PTE_READABLE;
533 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
534 pte_flag |= AMDGPU_PTE_WRITEABLE;
535
536 switch (flags & AMDGPU_VM_MTYPE_MASK) {
537 case AMDGPU_VM_MTYPE_DEFAULT:
538 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
539 break;
540 case AMDGPU_VM_MTYPE_NC:
541 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
542 break;
543 case AMDGPU_VM_MTYPE_WC:
544 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
545 break;
546 case AMDGPU_VM_MTYPE_CC:
547 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
548 break;
549 case AMDGPU_VM_MTYPE_UC:
550 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
551 break;
552 default:
553 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
554 break;
555 }
556
557 if (flags & AMDGPU_VM_PAGE_PRT)
558 pte_flag |= AMDGPU_PTE_PRT;
559
560 return pte_flag;
561}
562
563static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
564 uint64_t *addr, uint64_t *flags)
565{
566 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
567 *addr = adev->vm_manager.vram_base_offset + *addr -
568 adev->gmc.vram_start;
569 BUG_ON(*addr & 0xFFFF00000000003FULL);
570
571 if (!adev->gmc.translate_further)
572 return;
573
574 if (level == AMDGPU_VM_PDB1) {
575
576 if (!(*flags & AMDGPU_PDE_PTE))
577 *flags |= AMDGPU_PDE_BFS(0x9);
578
579 } else if (level == AMDGPU_VM_PDB0) {
580 if (*flags & AMDGPU_PDE_PTE)
581 *flags &= ~AMDGPU_PDE_PTE;
582 else
583 *flags |= AMDGPU_PTE_TF;
584 }
585}
586
587static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
588 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
589 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
590 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
591 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
592 .get_vm_pde = gmc_v9_0_get_vm_pde
593};
594
595static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
596{
597 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
598}
599
600static int gmc_v9_0_early_init(void *handle)
601{
602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
603
604 gmc_v9_0_set_gmc_funcs(adev);
605 gmc_v9_0_set_irq_funcs(adev);
606
607 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
608 adev->gmc.shared_aperture_end =
609 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
610 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
611 adev->gmc.private_aperture_end =
612 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
613
614 return 0;
615}
616
617static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
618{
619
620
621
622
623
624
625
626
627
628
629 switch (adev->asic_type) {
630 case CHIP_VEGA10:
631 case CHIP_RAVEN:
632 return true;
633 case CHIP_VEGA12:
634 case CHIP_VEGA20:
635 default:
636 return false;
637 }
638}
639
640static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
641{
642 struct amdgpu_ring *ring;
643 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
644 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
645 unsigned i;
646 unsigned vmhub, inv_eng;
647
648 for (i = 0; i < adev->num_rings; ++i) {
649 ring = adev->rings[i];
650 vmhub = ring->funcs->vmhub;
651
652 inv_eng = ffs(vm_inv_engs[vmhub]);
653 if (!inv_eng) {
654 dev_err(adev->dev, "no VM inv eng for ring %s\n",
655 ring->name);
656 return -EINVAL;
657 }
658
659 ring->vm_inv_eng = inv_eng - 1;
660 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
661
662 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
663 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
664 }
665
666 return 0;
667}
668
669static int gmc_v9_0_ecc_late_init(void *handle)
670{
671 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
672 struct ras_common_if **ras_if = &adev->gmc.ras_if;
673 struct ras_ih_if ih_info = {
674 .cb = gmc_v9_0_process_ras_data_cb,
675 };
676 struct ras_fs_if fs_info = {
677 .sysfs_name = "umc_err_count",
678 .debugfs_name = "umc_err_inject",
679 };
680 struct ras_common_if ras_block = {
681 .block = AMDGPU_RAS_BLOCK__UMC,
682 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
683 .sub_block_index = 0,
684 .name = "umc",
685 };
686 int r;
687
688 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
689 amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
690 return 0;
691 }
692
693 if (*ras_if) {
694
695
696
697 ih_info.head = **ras_if;
698 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
699 if (r) {
700 if (r == -EAGAIN) {
701
702 amdgpu_ras_request_reset_on_boot(adev,
703 AMDGPU_RAS_BLOCK__UMC);
704 return 0;
705 }
706
707 goto irq;
708 }
709
710 goto resume;
711 }
712
713 *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
714 if (!*ras_if)
715 return -ENOMEM;
716
717 **ras_if = ras_block;
718
719 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
720 if (r) {
721 if (r == -EAGAIN) {
722 amdgpu_ras_request_reset_on_boot(adev,
723 AMDGPU_RAS_BLOCK__UMC);
724 r = 0;
725 }
726 goto feature;
727 }
728
729 ih_info.head = **ras_if;
730 fs_info.head = **ras_if;
731
732 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
733 if (r)
734 goto interrupt;
735
736 amdgpu_ras_debugfs_create(adev, &fs_info);
737
738 r = amdgpu_ras_sysfs_create(adev, &fs_info);
739 if (r)
740 goto sysfs;
741resume:
742 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
743 if (r)
744 goto irq;
745
746 return 0;
747irq:
748 amdgpu_ras_sysfs_remove(adev, *ras_if);
749sysfs:
750 amdgpu_ras_debugfs_remove(adev, *ras_if);
751 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
752interrupt:
753 amdgpu_ras_feature_enable(adev, *ras_if, 0);
754feature:
755 kfree(*ras_if);
756 *ras_if = NULL;
757 return r;
758}
759
760
761static int gmc_v9_0_late_init(void *handle)
762{
763 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
764 bool r;
765
766 if (!gmc_v9_0_keep_stolen_memory(adev))
767 amdgpu_bo_late_init(adev);
768
769 r = gmc_v9_0_allocate_vm_inv_eng(adev);
770 if (r)
771 return r;
772
773 if (!amdgpu_sriov_vf(adev)) {
774 switch (adev->asic_type) {
775 case CHIP_VEGA10:
776 case CHIP_VEGA20:
777 r = amdgpu_atomfirmware_mem_ecc_supported(adev);
778 if (!r) {
779 DRM_INFO("ECC is not present.\n");
780 if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
781 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
782 } else {
783 DRM_INFO("ECC is active.\n");
784 }
785
786 r = amdgpu_atomfirmware_sram_ecc_supported(adev);
787 if (!r) {
788 DRM_INFO("SRAM ECC is not present.\n");
789 } else {
790 DRM_INFO("SRAM ECC is active.\n");
791 }
792 break;
793 default:
794 break;
795 }
796 }
797
798 r = gmc_v9_0_ecc_late_init(handle);
799 if (r)
800 return r;
801
802 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
803}
804
805static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
806 struct amdgpu_gmc *mc)
807{
808 u64 base = 0;
809 if (!amdgpu_sriov_vf(adev))
810 base = mmhub_v1_0_get_fb_location(adev);
811
812 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
813 amdgpu_gmc_vram_location(adev, mc, base);
814 amdgpu_gmc_gart_location(adev, mc);
815 if (!amdgpu_sriov_vf(adev))
816 amdgpu_gmc_agp_location(adev, mc);
817
818 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
819
820
821 adev->vm_manager.vram_base_offset +=
822 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
823}
824
825
826
827
828
829
830
831
832
833
834static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
835{
836 int chansize, numchan;
837 int r;
838
839 if (amdgpu_sriov_vf(adev)) {
840
841
842
843
844 adev->gmc.vram_width = 2048;
845 } else if (amdgpu_emu_mode != 1) {
846 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
847 }
848
849 if (!adev->gmc.vram_width) {
850
851 if (adev->flags & AMD_IS_APU)
852 chansize = 64;
853 else
854 chansize = 128;
855
856 numchan = adev->df_funcs->get_hbm_channel_number(adev);
857 adev->gmc.vram_width = numchan * chansize;
858 }
859
860
861 adev->gmc.mc_vram_size =
862 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
863 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
864
865 if (!(adev->flags & AMD_IS_APU)) {
866 r = amdgpu_device_resize_fb_bar(adev);
867 if (r)
868 return r;
869 }
870 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
871 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
872
873#ifdef CONFIG_X86_64
874 if (adev->flags & AMD_IS_APU) {
875 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
876 adev->gmc.aper_size = adev->gmc.real_vram_size;
877 }
878#endif
879
880 adev->gmc.visible_vram_size = adev->gmc.aper_size;
881 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
882 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
883
884
885 if (amdgpu_gart_size == -1) {
886 switch (adev->asic_type) {
887 case CHIP_VEGA10:
888 case CHIP_VEGA12:
889 case CHIP_VEGA20:
890 default:
891 adev->gmc.gart_size = 512ULL << 20;
892 break;
893 case CHIP_RAVEN:
894 adev->gmc.gart_size = 1024ULL << 20;
895 break;
896 }
897 } else {
898 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
899 }
900
901 gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
902
903 return 0;
904}
905
906static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
907{
908 int r;
909
910 if (adev->gart.bo) {
911 WARN(1, "VEGA10 PCIE GART already initialized\n");
912 return 0;
913 }
914
915 r = amdgpu_gart_init(adev);
916 if (r)
917 return r;
918 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
919 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
920 AMDGPU_PTE_EXECUTABLE;
921 return amdgpu_gart_table_vram_alloc(adev);
922}
923
924static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
925{
926 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
927 unsigned size;
928
929
930
931
932
933 if (gmc_v9_0_keep_stolen_memory(adev))
934 return 9 * 1024 * 1024;
935
936 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
937 size = 9 * 1024 * 1024;
938 } else {
939 u32 viewport;
940
941 switch (adev->asic_type) {
942 case CHIP_RAVEN:
943 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
944 size = (REG_GET_FIELD(viewport,
945 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
946 REG_GET_FIELD(viewport,
947 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
948 4);
949 break;
950 case CHIP_VEGA10:
951 case CHIP_VEGA12:
952 case CHIP_VEGA20:
953 default:
954 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
955 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
956 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
957 4);
958 break;
959 }
960 }
961
962 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
963 return 0;
964
965 return size;
966}
967
968static int gmc_v9_0_sw_init(void *handle)
969{
970 int r;
971 int dma_bits;
972 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
973
974 gfxhub_v1_0_init(adev);
975 mmhub_v1_0_init(adev);
976
977 spin_lock_init(&adev->gmc.invalidate_lock);
978
979 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
980 switch (adev->asic_type) {
981 case CHIP_RAVEN:
982 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
983 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
984 } else {
985
986 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
987 adev->gmc.translate_further =
988 adev->vm_manager.num_level > 1;
989 }
990 break;
991 case CHIP_VEGA10:
992 case CHIP_VEGA12:
993 case CHIP_VEGA20:
994
995
996
997
998
999
1000 if (amdgpu_sriov_vf(adev))
1001 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1002 else
1003 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1004 break;
1005 default:
1006 break;
1007 }
1008
1009
1010 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1011 &adev->gmc.vm_fault);
1012 if (r)
1013 return r;
1014
1015 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1016 &adev->gmc.vm_fault);
1017
1018 if (r)
1019 return r;
1020
1021
1022 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1023 &adev->gmc.ecc_irq);
1024 if (r)
1025 return r;
1026
1027
1028
1029
1030
1031 adev->gmc.mc_mask = 0xffffffffffffULL;
1032
1033
1034
1035
1036
1037
1038 adev->need_dma32 = false;
1039 dma_bits = adev->need_dma32 ? 32 : 44;
1040 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1041 if (r) {
1042 adev->need_dma32 = true;
1043 dma_bits = 32;
1044 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1045 }
1046 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
1047 if (r) {
1048 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
1049 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
1050 }
1051 adev->need_swiotlb = drm_need_swiotlb(dma_bits);
1052
1053 if (adev->gmc.xgmi.supported) {
1054 r = gfxhub_v1_1_get_xgmi_info(adev);
1055 if (r)
1056 return r;
1057 }
1058
1059 r = gmc_v9_0_mc_init(adev);
1060 if (r)
1061 return r;
1062
1063 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
1064
1065
1066 r = amdgpu_bo_init(adev);
1067 if (r)
1068 return r;
1069
1070 r = gmc_v9_0_gart_init(adev);
1071 if (r)
1072 return r;
1073
1074
1075
1076
1077
1078
1079
1080 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
1081 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
1082
1083 amdgpu_vm_manager_init(adev);
1084
1085 return 0;
1086}
1087
1088static int gmc_v9_0_sw_fini(void *handle)
1089{
1090 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1091
1092 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
1093 adev->gmc.ras_if) {
1094 struct ras_common_if *ras_if = adev->gmc.ras_if;
1095 struct ras_ih_if ih_info = {
1096 .head = *ras_if,
1097 };
1098
1099
1100 amdgpu_ras_debugfs_remove(adev, ras_if);
1101 amdgpu_ras_sysfs_remove(adev, ras_if);
1102
1103 amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
1104 amdgpu_ras_feature_enable(adev, ras_if, 0);
1105 kfree(ras_if);
1106 }
1107
1108 amdgpu_gem_force_release(adev);
1109 amdgpu_vm_manager_fini(adev);
1110
1111 if (gmc_v9_0_keep_stolen_memory(adev))
1112 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
1113
1114 amdgpu_gart_table_vram_free(adev);
1115 amdgpu_bo_fini(adev);
1116 amdgpu_gart_fini(adev);
1117
1118 return 0;
1119}
1120
1121static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1122{
1123
1124 switch (adev->asic_type) {
1125 case CHIP_VEGA10:
1126 if (amdgpu_virt_support_skip_setting(adev))
1127 break;
1128
1129 case CHIP_VEGA20:
1130 soc15_program_register_sequence(adev,
1131 golden_settings_mmhub_1_0_0,
1132 ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1133 soc15_program_register_sequence(adev,
1134 golden_settings_athub_1_0_0,
1135 ARRAY_SIZE(golden_settings_athub_1_0_0));
1136 break;
1137 case CHIP_VEGA12:
1138 break;
1139 case CHIP_RAVEN:
1140 soc15_program_register_sequence(adev,
1141 golden_settings_athub_1_0_0,
1142 ARRAY_SIZE(golden_settings_athub_1_0_0));
1143 break;
1144 default:
1145 break;
1146 }
1147}
1148
1149
1150
1151
1152
1153
1154static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1155{
1156 int r;
1157 bool value;
1158 u32 tmp;
1159
1160 amdgpu_device_program_register_sequence(adev,
1161 golden_settings_vega10_hdp,
1162 ARRAY_SIZE(golden_settings_vega10_hdp));
1163
1164 if (adev->gart.bo == NULL) {
1165 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1166 return -EINVAL;
1167 }
1168 r = amdgpu_gart_table_vram_pin(adev);
1169 if (r)
1170 return r;
1171
1172 switch (adev->asic_type) {
1173 case CHIP_RAVEN:
1174 mmhub_v1_0_update_power_gating(adev, true);
1175 break;
1176 default:
1177 break;
1178 }
1179
1180 r = gfxhub_v1_0_gart_enable(adev);
1181 if (r)
1182 return r;
1183
1184 r = mmhub_v1_0_gart_enable(adev);
1185 if (r)
1186 return r;
1187
1188 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1189
1190 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1191 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1192
1193 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1194 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1195
1196
1197 adev->nbio_funcs->hdp_flush(adev, NULL);
1198
1199 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1200 value = false;
1201 else
1202 value = true;
1203
1204 gfxhub_v1_0_set_fault_enable_default(adev, value);
1205 mmhub_v1_0_set_fault_enable_default(adev, value);
1206 gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
1207
1208 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1209 (unsigned)(adev->gmc.gart_size >> 20),
1210 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1211 adev->gart.ready = true;
1212 return 0;
1213}
1214
1215static int gmc_v9_0_hw_init(void *handle)
1216{
1217 int r;
1218 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1219
1220
1221 gmc_v9_0_init_golden_registers(adev);
1222
1223 if (adev->mode_info.num_crtc) {
1224
1225 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1226
1227
1228 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1229 }
1230
1231 r = gmc_v9_0_gart_enable(adev);
1232
1233 return r;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1244{
1245 gfxhub_v1_0_gart_disable(adev);
1246 mmhub_v1_0_gart_disable(adev);
1247 amdgpu_gart_table_vram_unpin(adev);
1248}
1249
1250static int gmc_v9_0_hw_fini(void *handle)
1251{
1252 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1253
1254 if (amdgpu_sriov_vf(adev)) {
1255
1256 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1257 return 0;
1258 }
1259
1260 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1261 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1262 gmc_v9_0_gart_disable(adev);
1263
1264 return 0;
1265}
1266
1267static int gmc_v9_0_suspend(void *handle)
1268{
1269 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270
1271 return gmc_v9_0_hw_fini(adev);
1272}
1273
1274static int gmc_v9_0_resume(void *handle)
1275{
1276 int r;
1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278
1279 r = gmc_v9_0_hw_init(adev);
1280 if (r)
1281 return r;
1282
1283 amdgpu_vmid_reset_all(adev);
1284
1285 return 0;
1286}
1287
1288static bool gmc_v9_0_is_idle(void *handle)
1289{
1290
1291 return true;
1292}
1293
1294static int gmc_v9_0_wait_for_idle(void *handle)
1295{
1296
1297 return 0;
1298}
1299
1300static int gmc_v9_0_soft_reset(void *handle)
1301{
1302
1303 return 0;
1304}
1305
1306static int gmc_v9_0_set_clockgating_state(void *handle,
1307 enum amd_clockgating_state state)
1308{
1309 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1310
1311 return mmhub_v1_0_set_clockgating(adev, state);
1312}
1313
1314static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
1315{
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317
1318 mmhub_v1_0_get_clockgating(adev, flags);
1319}
1320
1321static int gmc_v9_0_set_powergating_state(void *handle,
1322 enum amd_powergating_state state)
1323{
1324 return 0;
1325}
1326
1327const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1328 .name = "gmc_v9_0",
1329 .early_init = gmc_v9_0_early_init,
1330 .late_init = gmc_v9_0_late_init,
1331 .sw_init = gmc_v9_0_sw_init,
1332 .sw_fini = gmc_v9_0_sw_fini,
1333 .hw_init = gmc_v9_0_hw_init,
1334 .hw_fini = gmc_v9_0_hw_fini,
1335 .suspend = gmc_v9_0_suspend,
1336 .resume = gmc_v9_0_resume,
1337 .is_idle = gmc_v9_0_is_idle,
1338 .wait_for_idle = gmc_v9_0_wait_for_idle,
1339 .soft_reset = gmc_v9_0_soft_reset,
1340 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
1341 .set_powergating_state = gmc_v9_0_set_powergating_state,
1342 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
1343};
1344
1345const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1346{
1347 .type = AMD_IP_BLOCK_TYPE_GMC,
1348 .major = 9,
1349 .minor = 0,
1350 .rev = 0,
1351 .funcs = &gmc_v9_0_ip_funcs,
1352};
1353