1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include "drmP.h"
26#include "amdgpu.h"
27#include "gmc_v6_0.h"
28#include "amdgpu_ucode.h"
29#include "si/sid.h"
30
31static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
32static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
33static int gmc_v6_0_wait_for_idle(void *handle);
34
35MODULE_FIRMWARE("radeon/tahiti_mc.bin");
36MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
37MODULE_FIRMWARE("radeon/verde_mc.bin");
38MODULE_FIRMWARE("radeon/oland_mc.bin");
39
40static const u32 crtc_offsets[6] =
41{
42 SI_CRTC0_REGISTER_OFFSET,
43 SI_CRTC1_REGISTER_OFFSET,
44 SI_CRTC2_REGISTER_OFFSET,
45 SI_CRTC3_REGISTER_OFFSET,
46 SI_CRTC4_REGISTER_OFFSET,
47 SI_CRTC5_REGISTER_OFFSET
48};
49
50static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
51 struct amdgpu_mode_mc_save *save)
52{
53 u32 blackout;
54
55 if (adev->mode_info.num_crtc)
56 amdgpu_display_stop_mc_access(adev, save);
57
58 gmc_v6_0_wait_for_idle((void *)adev);
59
60 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
61 if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) {
62
63 WREG32(BIF_FB_EN, 0);
64
65 blackout = REG_SET_FIELD(blackout,
66 mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
67 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
68 }
69
70 udelay(100);
71
72}
73
74static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
75 struct amdgpu_mode_mc_save *save)
76{
77 u32 tmp;
78
79
80 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
81 tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0);
82 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
83
84 tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
85 tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
86 WREG32(BIF_FB_EN, tmp);
87
88 if (adev->mode_info.num_crtc)
89 amdgpu_display_resume_mc_access(adev, save);
90
91}
92
93static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
94{
95 const char *chip_name;
96 char fw_name[30];
97 int err;
98
99 DRM_DEBUG("\n");
100
101 switch (adev->asic_type) {
102 case CHIP_TAHITI:
103 chip_name = "tahiti";
104 break;
105 case CHIP_PITCAIRN:
106 chip_name = "pitcairn";
107 break;
108 case CHIP_VERDE:
109 chip_name = "verde";
110 break;
111 case CHIP_OLAND:
112 chip_name = "oland";
113 break;
114 case CHIP_HAINAN:
115 chip_name = "hainan";
116 break;
117 default: BUG();
118 }
119
120 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
121 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
122 if (err)
123 goto out;
124
125 err = amdgpu_ucode_validate(adev->mc.fw);
126
127out:
128 if (err) {
129 dev_err(adev->dev,
130 "si_mc: Failed to load firmware \"%s\"\n",
131 fw_name);
132 release_firmware(adev->mc.fw);
133 adev->mc.fw = NULL;
134 }
135 return err;
136}
137
138static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
139{
140 const __le32 *new_fw_data = NULL;
141 u32 running;
142 const __le32 *new_io_mc_regs = NULL;
143 int i, regs_size, ucode_size;
144 const struct mc_firmware_header_v1_0 *hdr;
145
146 if (!adev->mc.fw)
147 return -EINVAL;
148
149 hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
150
151 amdgpu_ucode_print_mc_hdr(&hdr->header);
152
153 adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
154 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
155 new_io_mc_regs = (const __le32 *)
156 (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
157 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
158 new_fw_data = (const __le32 *)
159 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
160
161 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
162
163 if (running == 0) {
164
165
166 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
167 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
168
169
170 for (i = 0; i < regs_size; i++) {
171 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
172 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
173 }
174
175 for (i = 0; i < ucode_size; i++) {
176 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
177 }
178
179
180 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
181 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
182 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
183
184
185 for (i = 0; i < adev->usec_timeout; i++) {
186 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
187 break;
188 udelay(1);
189 }
190 for (i = 0; i < adev->usec_timeout; i++) {
191 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
192 break;
193 udelay(1);
194 }
195
196 }
197
198 return 0;
199}
200
201static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
202 struct amdgpu_mc *mc)
203{
204 if (mc->mc_vram_size > 0xFFC0000000ULL) {
205 dev_warn(adev->dev, "limiting VRAM\n");
206 mc->real_vram_size = 0xFFC0000000ULL;
207 mc->mc_vram_size = 0xFFC0000000ULL;
208 }
209 amdgpu_vram_location(adev, &adev->mc, 0);
210 adev->mc.gtt_base_align = 0;
211 amdgpu_gtt_location(adev, mc);
212}
213
214static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
215{
216 struct amdgpu_mode_mc_save save;
217 u32 tmp;
218 int i, j;
219
220
221 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
222 WREG32((0xb05 + j), 0x00000000);
223 WREG32((0xb06 + j), 0x00000000);
224 WREG32((0xb07 + j), 0x00000000);
225 WREG32((0xb08 + j), 0x00000000);
226 WREG32((0xb09 + j), 0x00000000);
227 }
228 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
229
230 gmc_v6_0_mc_stop(adev, &save);
231
232 if (gmc_v6_0_wait_for_idle((void *)adev)) {
233 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
234 }
235
236 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
237
238 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
239 adev->mc.vram_start >> 12);
240 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
241 adev->mc.vram_end >> 12);
242 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
243 adev->vram_scratch.gpu_addr >> 12);
244 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
245 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
246 WREG32(MC_VM_FB_LOCATION, tmp);
247
248 WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
249 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
250 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
251 WREG32(MC_VM_AGP_BASE, 0);
252 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
253 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
254
255 if (gmc_v6_0_wait_for_idle((void *)adev)) {
256 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
257 }
258 gmc_v6_0_mc_resume(adev, &save);
259 amdgpu_display_set_vga_render_state(adev, false);
260}
261
262static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
263{
264
265 u32 tmp;
266 int chansize, numchan;
267
268 tmp = RREG32(MC_ARB_RAMCFG);
269 if (tmp & CHANSIZE_OVERRIDE) {
270 chansize = 16;
271 } else if (tmp & CHANSIZE_MASK) {
272 chansize = 64;
273 } else {
274 chansize = 32;
275 }
276 tmp = RREG32(MC_SHARED_CHMAP);
277 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
278 case 0:
279 default:
280 numchan = 1;
281 break;
282 case 1:
283 numchan = 2;
284 break;
285 case 2:
286 numchan = 4;
287 break;
288 case 3:
289 numchan = 8;
290 break;
291 case 4:
292 numchan = 3;
293 break;
294 case 5:
295 numchan = 6;
296 break;
297 case 6:
298 numchan = 10;
299 break;
300 case 7:
301 numchan = 12;
302 break;
303 case 8:
304 numchan = 16;
305 break;
306 }
307 adev->mc.vram_width = numchan * chansize;
308
309 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
310 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
311
312 adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
313 adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
314 adev->mc.visible_vram_size = adev->mc.aper_size;
315
316
317
318
319 if (amdgpu_gart_size == -1)
320 adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev);
321 else
322 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
323
324 gmc_v6_0_vram_gtt_location(adev, &adev->mc);
325
326 return 0;
327}
328
329static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
330 uint32_t vmid)
331{
332 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
333
334 WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
335}
336
337static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
338 void *cpu_pt_addr,
339 uint32_t gpu_page_idx,
340 uint64_t addr,
341 uint32_t flags)
342{
343 void __iomem *ptr = (void *)cpu_pt_addr;
344 uint64_t value;
345
346 value = addr & 0xFFFFFFFFFFFFF000ULL;
347 value |= flags;
348 writeq(value, ptr + (gpu_page_idx * 8));
349
350 return 0;
351}
352
353static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
354 bool value)
355{
356 u32 tmp;
357
358 tmp = RREG32(VM_CONTEXT1_CNTL);
359 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
360 xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
361 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
362 xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
363 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
364 xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
365 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
366 xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
367 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
368 xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value);
369 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
370 xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
371 WREG32(VM_CONTEXT1_CNTL, tmp);
372}
373
374static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
375{
376 int r, i;
377
378 if (adev->gart.robj == NULL) {
379 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
380 return -EINVAL;
381 }
382 r = amdgpu_gart_table_vram_pin(adev);
383 if (r)
384 return r;
385
386 WREG32(MC_VM_MX_L1_TLB_CNTL,
387 (0xA << 7) |
388 ENABLE_L1_TLB |
389 ENABLE_L1_FRAGMENT_PROCESSING |
390 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
391 ENABLE_ADVANCED_DRIVER_MODEL |
392 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
393
394 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
395 ENABLE_L2_FRAGMENT_PROCESSING |
396 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
397 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
398 EFFECTIVE_L2_QUEUE_SIZE(7) |
399 CONTEXT1_IDENTITY_ACCESS_MODE(1));
400 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
401 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
402 BANK_SELECT(4) |
403 L2_CACHE_BIGK_FRAGMENT_SIZE(4));
404
405 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
406 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
407 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
408 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
409 (u32)(adev->dummy_page.addr >> 12));
410 WREG32(VM_CONTEXT0_CNTL2, 0);
411 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
412 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
413
414 WREG32(0x575, 0);
415 WREG32(0x576, 0);
416 WREG32(0x577, 0);
417
418
419
420 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
421 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
422
423
424
425
426 for (i = 1; i < 16; i++) {
427 if (i < 8)
428 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
429 adev->gart.table_addr >> 12);
430 else
431 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
432 adev->gart.table_addr >> 12);
433 }
434
435
436 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
437 (u32)(adev->dummy_page.addr >> 12));
438 WREG32(VM_CONTEXT1_CNTL2, 4);
439 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
440 PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
441 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
442 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
443 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
444 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
445 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
446 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
447 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
448 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
449 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
450 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
451 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
452 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
453
454 gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
455 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
456 (unsigned)(adev->mc.gtt_size >> 20),
457 (unsigned long long)adev->gart.table_addr);
458 adev->gart.ready = true;
459 return 0;
460}
461
462static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
463{
464 int r;
465
466 if (adev->gart.robj) {
467 dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
468 return 0;
469 }
470 r = amdgpu_gart_init(adev);
471 if (r)
472 return r;
473 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
474 return amdgpu_gart_table_vram_alloc(adev);
475}
476
477static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
478{
479
480
481
482
483
484
485
486
487
488
489
490
491 WREG32(VM_CONTEXT0_CNTL, 0);
492 WREG32(VM_CONTEXT1_CNTL, 0);
493
494 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
495 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
496
497 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
498 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
499 EFFECTIVE_L2_QUEUE_SIZE(7) |
500 CONTEXT1_IDENTITY_ACCESS_MODE(1));
501 WREG32(VM_L2_CNTL2, 0);
502 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
503 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
504 amdgpu_gart_table_vram_unpin(adev);
505}
506
507static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
508{
509 amdgpu_gart_table_vram_free(adev);
510 amdgpu_gart_fini(adev);
511}
512
513static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
514{
515
516
517
518
519
520
521 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
522 amdgpu_vm_manager_init(adev);
523
524
525 if (adev->flags & AMD_IS_APU) {
526 u64 tmp = RREG32(MC_VM_FB_OFFSET);
527 tmp <<= 22;
528 adev->vm_manager.vram_base_offset = tmp;
529 } else
530 adev->vm_manager.vram_base_offset = 0;
531
532 return 0;
533}
534
535static void gmc_v6_0_vm_fini(struct amdgpu_device *adev)
536{
537}
538
539static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
540 u32 status, u32 addr, u32 mc_client)
541{
542 u32 mc_id;
543 u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
544 u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
545 xxPROTECTIONS);
546 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
547 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
548
549 mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
550 xxMEMORY_CLIENT_ID);
551
552 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
553 protections, vmid, addr,
554 REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
555 xxMEMORY_CLIENT_RW) ?
556 "write" : "read", block, mc_client, mc_id);
557}
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
687{
688 switch (mc_seq_vram_type) {
689 case MC_SEQ_MISC0__MT__GDDR1:
690 return AMDGPU_VRAM_TYPE_GDDR1;
691 case MC_SEQ_MISC0__MT__DDR2:
692 return AMDGPU_VRAM_TYPE_DDR2;
693 case MC_SEQ_MISC0__MT__GDDR3:
694 return AMDGPU_VRAM_TYPE_GDDR3;
695 case MC_SEQ_MISC0__MT__GDDR4:
696 return AMDGPU_VRAM_TYPE_GDDR4;
697 case MC_SEQ_MISC0__MT__GDDR5:
698 return AMDGPU_VRAM_TYPE_GDDR5;
699 case MC_SEQ_MISC0__MT__DDR3:
700 return AMDGPU_VRAM_TYPE_DDR3;
701 default:
702 return AMDGPU_VRAM_TYPE_UNKNOWN;
703 }
704}
705
706static int gmc_v6_0_early_init(void *handle)
707{
708 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
709
710 gmc_v6_0_set_gart_funcs(adev);
711 gmc_v6_0_set_irq_funcs(adev);
712
713 if (adev->flags & AMD_IS_APU) {
714 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
715 } else {
716 u32 tmp = RREG32(MC_SEQ_MISC0);
717 tmp &= MC_SEQ_MISC0__MT__MASK;
718 adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
719 }
720
721 return 0;
722}
723
724static int gmc_v6_0_late_init(void *handle)
725{
726 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
727
728 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
729}
730
731static int gmc_v6_0_sw_init(void *handle)
732{
733 int r;
734 int dma_bits;
735 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
736
737 r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
738 if (r)
739 return r;
740
741 r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
742 if (r)
743 return r;
744
745 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
746
747 adev->mc.mc_mask = 0xffffffffffULL;
748
749 adev->need_dma32 = false;
750 dma_bits = adev->need_dma32 ? 32 : 40;
751 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
752 if (r) {
753 adev->need_dma32 = true;
754 dma_bits = 32;
755 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
756 }
757 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
758 if (r) {
759 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
760 dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
761 }
762
763 r = gmc_v6_0_init_microcode(adev);
764 if (r) {
765 dev_err(adev->dev, "Failed to load mc firmware!\n");
766 return r;
767 }
768
769 r = amdgpu_ttm_global_init(adev);
770 if (r) {
771 return r;
772 }
773
774 r = gmc_v6_0_mc_init(adev);
775 if (r)
776 return r;
777
778 r = amdgpu_bo_init(adev);
779 if (r)
780 return r;
781
782 r = gmc_v6_0_gart_init(adev);
783 if (r)
784 return r;
785
786 if (!adev->vm_manager.enabled) {
787 r = gmc_v6_0_vm_init(adev);
788 if (r) {
789 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
790 return r;
791 }
792 adev->vm_manager.enabled = true;
793 }
794
795 return r;
796}
797
798static int gmc_v6_0_sw_fini(void *handle)
799{
800 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
801
802 if (adev->vm_manager.enabled) {
803 gmc_v6_0_vm_fini(adev);
804 adev->vm_manager.enabled = false;
805 }
806 gmc_v6_0_gart_fini(adev);
807 amdgpu_gem_force_release(adev);
808 amdgpu_bo_fini(adev);
809
810 return 0;
811}
812
813static int gmc_v6_0_hw_init(void *handle)
814{
815 int r;
816 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
817
818 gmc_v6_0_mc_program(adev);
819
820 if (!(adev->flags & AMD_IS_APU)) {
821 r = gmc_v6_0_mc_load_microcode(adev);
822 if (r) {
823 dev_err(adev->dev, "Failed to load MC firmware!\n");
824 return r;
825 }
826 }
827
828 r = gmc_v6_0_gart_enable(adev);
829 if (r)
830 return r;
831
832 return r;
833}
834
835static int gmc_v6_0_hw_fini(void *handle)
836{
837 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
838
839 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
840 gmc_v6_0_gart_disable(adev);
841
842 return 0;
843}
844
845static int gmc_v6_0_suspend(void *handle)
846{
847 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
848
849 if (adev->vm_manager.enabled) {
850 gmc_v6_0_vm_fini(adev);
851 adev->vm_manager.enabled = false;
852 }
853 gmc_v6_0_hw_fini(adev);
854
855 return 0;
856}
857
858static int gmc_v6_0_resume(void *handle)
859{
860 int r;
861 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
862
863 r = gmc_v6_0_hw_init(adev);
864 if (r)
865 return r;
866
867 if (!adev->vm_manager.enabled) {
868 r = gmc_v6_0_vm_init(adev);
869 if (r) {
870 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
871 return r;
872 }
873 adev->vm_manager.enabled = true;
874 }
875
876 return r;
877}
878
879static bool gmc_v6_0_is_idle(void *handle)
880{
881 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
882 u32 tmp = RREG32(SRBM_STATUS);
883
884 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
885 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
886 return false;
887
888 return true;
889}
890
891static int gmc_v6_0_wait_for_idle(void *handle)
892{
893 unsigned i;
894 u32 tmp;
895 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
896
897 for (i = 0; i < adev->usec_timeout; i++) {
898 tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
899 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
900 SRBM_STATUS__MCC_BUSY_MASK |
901 SRBM_STATUS__MCD_BUSY_MASK |
902 SRBM_STATUS__VMC_BUSY_MASK);
903 if (!tmp)
904 return 0;
905 udelay(1);
906 }
907 return -ETIMEDOUT;
908
909}
910
911static int gmc_v6_0_soft_reset(void *handle)
912{
913 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
914 struct amdgpu_mode_mc_save save;
915 u32 srbm_soft_reset = 0;
916 u32 tmp = RREG32(SRBM_STATUS);
917
918 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
919 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
920 mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1);
921
922 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
923 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
924 if (!(adev->flags & AMD_IS_APU))
925 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
926 mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
927 }
928
929 if (srbm_soft_reset) {
930 gmc_v6_0_mc_stop(adev, &save);
931 if (gmc_v6_0_wait_for_idle(adev)) {
932 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
933 }
934
935
936 tmp = RREG32(SRBM_SOFT_RESET);
937 tmp |= srbm_soft_reset;
938 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
939 WREG32(SRBM_SOFT_RESET, tmp);
940 tmp = RREG32(SRBM_SOFT_RESET);
941
942 udelay(50);
943
944 tmp &= ~srbm_soft_reset;
945 WREG32(SRBM_SOFT_RESET, tmp);
946 tmp = RREG32(SRBM_SOFT_RESET);
947
948 udelay(50);
949
950 gmc_v6_0_mc_resume(adev, &save);
951 udelay(50);
952 }
953
954 return 0;
955}
956
957static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
958 struct amdgpu_irq_src *src,
959 unsigned type,
960 enum amdgpu_interrupt_state state)
961{
962 u32 tmp;
963 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
964 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
965 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
966 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
967 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
968 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
969
970 switch (state) {
971 case AMDGPU_IRQ_STATE_DISABLE:
972 tmp = RREG32(VM_CONTEXT0_CNTL);
973 tmp &= ~bits;
974 WREG32(VM_CONTEXT0_CNTL, tmp);
975 tmp = RREG32(VM_CONTEXT1_CNTL);
976 tmp &= ~bits;
977 WREG32(VM_CONTEXT1_CNTL, tmp);
978 break;
979 case AMDGPU_IRQ_STATE_ENABLE:
980 tmp = RREG32(VM_CONTEXT0_CNTL);
981 tmp |= bits;
982 WREG32(VM_CONTEXT0_CNTL, tmp);
983 tmp = RREG32(VM_CONTEXT1_CNTL);
984 tmp |= bits;
985 WREG32(VM_CONTEXT1_CNTL, tmp);
986 break;
987 default:
988 break;
989 }
990
991 return 0;
992}
993
994static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
995 struct amdgpu_irq_src *source,
996 struct amdgpu_iv_entry *entry)
997{
998 u32 addr, status;
999
1000 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
1001 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
1002 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
1003
1004 if (!addr && !status)
1005 return 0;
1006
1007 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1008 gmc_v6_0_set_fault_enable_default(adev, false);
1009
1010 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1011 entry->src_id, entry->src_data);
1012 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1013 addr);
1014 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1015 status);
1016 gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1017
1018 return 0;
1019}
1020
1021static int gmc_v6_0_set_clockgating_state(void *handle,
1022 enum amd_clockgating_state state)
1023{
1024 return 0;
1025}
1026
1027static int gmc_v6_0_set_powergating_state(void *handle,
1028 enum amd_powergating_state state)
1029{
1030 return 0;
1031}
1032
1033const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1034 .name = "gmc_v6_0",
1035 .early_init = gmc_v6_0_early_init,
1036 .late_init = gmc_v6_0_late_init,
1037 .sw_init = gmc_v6_0_sw_init,
1038 .sw_fini = gmc_v6_0_sw_fini,
1039 .hw_init = gmc_v6_0_hw_init,
1040 .hw_fini = gmc_v6_0_hw_fini,
1041 .suspend = gmc_v6_0_suspend,
1042 .resume = gmc_v6_0_resume,
1043 .is_idle = gmc_v6_0_is_idle,
1044 .wait_for_idle = gmc_v6_0_wait_for_idle,
1045 .soft_reset = gmc_v6_0_soft_reset,
1046 .set_clockgating_state = gmc_v6_0_set_clockgating_state,
1047 .set_powergating_state = gmc_v6_0_set_powergating_state,
1048};
1049
1050static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
1051 .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
1052 .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
1053};
1054
1055static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1056 .set = gmc_v6_0_vm_fault_interrupt_state,
1057 .process = gmc_v6_0_process_interrupt,
1058};
1059
1060static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
1061{
1062 if (adev->gart.gart_funcs == NULL)
1063 adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
1064}
1065
1066static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1067{
1068 adev->mc.vm_fault.num_types = 1;
1069 adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1070}
1071
1072