1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/drm_cache.h>
29#include "amdgpu.h"
30#include "gmc_v6_0.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_gem.h"
33
34#include "bif/bif_3_0_d.h"
35#include "bif/bif_3_0_sh_mask.h"
36#include "oss/oss_1_0_d.h"
37#include "oss/oss_1_0_sh_mask.h"
38#include "gmc/gmc_6_0_d.h"
39#include "gmc/gmc_6_0_sh_mask.h"
40#include "dce/dce_6_0_d.h"
41#include "dce/dce_6_0_sh_mask.h"
42#include "si_enums.h"
43
44static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
45static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
46static int gmc_v6_0_wait_for_idle(void *handle);
47
48MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
49MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
50MODULE_FIRMWARE("amdgpu/verde_mc.bin");
51MODULE_FIRMWARE("amdgpu/oland_mc.bin");
52MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
53MODULE_FIRMWARE("amdgpu/si58_mc.bin");
54
55#define MC_SEQ_MISC0__MT__MASK 0xf0000000
56#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
57#define MC_SEQ_MISC0__MT__DDR2 0x20000000
58#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
59#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
60#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
61#define MC_SEQ_MISC0__MT__HBM 0x60000000
62#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
63
64static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
65{
66 u32 blackout;
67
68 gmc_v6_0_wait_for_idle((void *)adev);
69
70 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
71 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
72
73 WREG32(mmBIF_FB_EN, 0);
74
75 blackout = REG_SET_FIELD(blackout,
76 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
77 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
78 }
79
80 udelay(100);
81
82}
83
84static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
85{
86 u32 tmp;
87
88
89 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
90 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
91 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
92
93 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
94 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
95 WREG32(mmBIF_FB_EN, tmp);
96}
97
98static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
99{
100 const char *chip_name;
101 char fw_name[30];
102 int err;
103 bool is_58_fw = false;
104
105 DRM_DEBUG("\n");
106
107 switch (adev->asic_type) {
108 case CHIP_TAHITI:
109 chip_name = "tahiti";
110 break;
111 case CHIP_PITCAIRN:
112 chip_name = "pitcairn";
113 break;
114 case CHIP_VERDE:
115 chip_name = "verde";
116 break;
117 case CHIP_OLAND:
118 chip_name = "oland";
119 break;
120 case CHIP_HAINAN:
121 chip_name = "hainan";
122 break;
123 default: BUG();
124 }
125
126
127 if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
128 is_58_fw = true;
129
130 if (is_58_fw)
131 snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
132 else
133 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
134 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
135 if (err)
136 goto out;
137
138 err = amdgpu_ucode_validate(adev->gmc.fw);
139
140out:
141 if (err) {
142 dev_err(adev->dev,
143 "si_mc: Failed to load firmware \"%s\"\n",
144 fw_name);
145 release_firmware(adev->gmc.fw);
146 adev->gmc.fw = NULL;
147 }
148 return err;
149}
150
151static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
152{
153 const __le32 *new_fw_data = NULL;
154 u32 running;
155 const __le32 *new_io_mc_regs = NULL;
156 int i, regs_size, ucode_size;
157 const struct mc_firmware_header_v1_0 *hdr;
158
159 if (!adev->gmc.fw)
160 return -EINVAL;
161
162 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
163
164 amdgpu_ucode_print_mc_hdr(&hdr->header);
165
166 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
167 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
168 new_io_mc_regs = (const __le32 *)
169 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
170 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
171 new_fw_data = (const __le32 *)
172 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
173
174 running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
175
176 if (running == 0) {
177
178
179 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
180 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
181
182
183 for (i = 0; i < regs_size; i++) {
184 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
185 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
186 }
187
188 for (i = 0; i < ucode_size; i++) {
189 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
190 }
191
192
193 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
194 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
195 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
196
197
198 for (i = 0; i < adev->usec_timeout; i++) {
199 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
200 break;
201 udelay(1);
202 }
203 for (i = 0; i < adev->usec_timeout; i++) {
204 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
205 break;
206 udelay(1);
207 }
208
209 }
210
211 return 0;
212}
213
214static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
215 struct amdgpu_gmc *mc)
216{
217 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
218 base <<= 24;
219
220 amdgpu_gmc_vram_location(adev, mc, base);
221 amdgpu_gmc_gart_location(adev, mc);
222}
223
224static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
225{
226 int i, j;
227
228
229 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
230 WREG32((0xb05 + j), 0x00000000);
231 WREG32((0xb06 + j), 0x00000000);
232 WREG32((0xb07 + j), 0x00000000);
233 WREG32((0xb08 + j), 0x00000000);
234 WREG32((0xb09 + j), 0x00000000);
235 }
236 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
237
238 if (gmc_v6_0_wait_for_idle((void *)adev)) {
239 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
240 }
241
242 if (adev->mode_info.num_crtc) {
243 u32 tmp;
244
245
246 tmp = RREG32(mmVGA_HDP_CONTROL);
247 tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
248 WREG32(mmVGA_HDP_CONTROL, tmp);
249
250
251 tmp = RREG32(mmVGA_RENDER_CONTROL);
252 tmp &= ~VGA_VSTATUS_CNTL;
253 WREG32(mmVGA_RENDER_CONTROL, tmp);
254 }
255
256 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
257 adev->gmc.vram_start >> 12);
258 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
259 adev->gmc.vram_end >> 12);
260 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
261 adev->vram_scratch.gpu_addr >> 12);
262 WREG32(mmMC_VM_AGP_BASE, 0);
263 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
264 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
265
266 if (gmc_v6_0_wait_for_idle((void *)adev)) {
267 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
268 }
269}
270
271static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
272{
273
274 u32 tmp;
275 int chansize, numchan;
276 int r;
277
278 tmp = RREG32(mmMC_ARB_RAMCFG);
279 if (tmp & (1 << 11)) {
280 chansize = 16;
281 } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
282 chansize = 64;
283 } else {
284 chansize = 32;
285 }
286 tmp = RREG32(mmMC_SHARED_CHMAP);
287 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
288 case 0:
289 default:
290 numchan = 1;
291 break;
292 case 1:
293 numchan = 2;
294 break;
295 case 2:
296 numchan = 4;
297 break;
298 case 3:
299 numchan = 8;
300 break;
301 case 4:
302 numchan = 3;
303 break;
304 case 5:
305 numchan = 6;
306 break;
307 case 6:
308 numchan = 10;
309 break;
310 case 7:
311 numchan = 12;
312 break;
313 case 8:
314 numchan = 16;
315 break;
316 }
317 adev->gmc.vram_width = numchan * chansize;
318
319 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
320 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
321
322 if (!(adev->flags & AMD_IS_APU)) {
323 r = amdgpu_device_resize_fb_bar(adev);
324 if (r)
325 return r;
326 }
327 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
328 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
329 adev->gmc.visible_vram_size = adev->gmc.aper_size;
330
331
332 if (amdgpu_gart_size == -1) {
333 switch (adev->asic_type) {
334 case CHIP_HAINAN:
335 default:
336 adev->gmc.gart_size = 256ULL << 20;
337 break;
338 case CHIP_VERDE:
339 case CHIP_TAHITI:
340 case CHIP_PITCAIRN:
341 case CHIP_OLAND:
342 adev->gmc.gart_size = 1024ULL << 20;
343 break;
344 }
345 } else {
346 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
347 }
348
349 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
350 gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
351
352 return 0;
353}
354
355static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
356 uint32_t vmhub, uint32_t flush_type)
357{
358 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
359}
360
361static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
362 unsigned vmid, uint64_t pd_addr)
363{
364 uint32_t reg;
365
366
367 if (vmid < 8)
368 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
369 else
370 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
371 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
372
373
374 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
375
376 return pd_addr;
377}
378
379static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
380 uint64_t *addr, uint64_t *flags)
381{
382 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
383}
384
385static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
386 struct amdgpu_bo_va_mapping *mapping,
387 uint64_t *flags)
388{
389 *flags &= ~AMDGPU_PTE_EXECUTABLE;
390 *flags &= ~AMDGPU_PTE_PRT;
391}
392
393static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
394 bool value)
395{
396 u32 tmp;
397
398 tmp = RREG32(mmVM_CONTEXT1_CNTL);
399 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
400 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
401 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
402 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
403 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
404 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
405 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
406 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
407 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
408 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
409 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
410 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
411 WREG32(mmVM_CONTEXT1_CNTL, tmp);
412}
413
414
415
416
417
418
419
420static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
421{
422 u32 tmp;
423
424 if (enable && !adev->gmc.prt_warning) {
425 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
426 adev->gmc.prt_warning = true;
427 }
428
429 tmp = RREG32(mmVM_PRT_CNTL);
430 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
431 CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
432 enable);
433 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
434 TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
435 enable);
436 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
437 L2_CACHE_STORE_INVALID_ENTRIES,
438 enable);
439 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
440 L1_TLB_STORE_INVALID_ENTRIES,
441 enable);
442 WREG32(mmVM_PRT_CNTL, tmp);
443
444 if (enable) {
445 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
446 uint32_t high = adev->vm_manager.max_pfn -
447 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
448
449 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
450 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
451 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
452 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
453 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
454 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
455 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
456 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
457 } else {
458 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
459 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
460 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
461 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
462 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
463 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
464 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
465 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
466 }
467}
468
469static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
470{
471 uint64_t table_addr;
472 int r, i;
473 u32 field;
474
475 if (adev->gart.bo == NULL) {
476 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
477 return -EINVAL;
478 }
479 r = amdgpu_gart_table_vram_pin(adev);
480 if (r)
481 return r;
482
483 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
484
485
486 WREG32(mmMC_VM_MX_L1_TLB_CNTL,
487 (0xA << 7) |
488 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
489 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
490 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
491 MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
492 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
493
494 WREG32(mmVM_L2_CNTL,
495 VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
496 VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
497 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
498 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
499 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
500 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
501 WREG32(mmVM_L2_CNTL2,
502 VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
503 VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
504
505 field = adev->vm_manager.fragment_size;
506 WREG32(mmVM_L2_CNTL3,
507 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
508 (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
509 (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
510
511 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
512 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
513 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
514 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
515 (u32)(adev->dummy_page_addr >> 12));
516 WREG32(mmVM_CONTEXT0_CNTL2, 0);
517 WREG32(mmVM_CONTEXT0_CNTL,
518 VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
519 (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
520 VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
521
522 WREG32(0x575, 0);
523 WREG32(0x576, 0);
524 WREG32(0x577, 0);
525
526
527
528 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
529 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
530
531
532
533
534 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
535 if (i < 8)
536 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
537 table_addr >> 12);
538 else
539 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
540 table_addr >> 12);
541 }
542
543
544 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
545 (u32)(adev->dummy_page_addr >> 12));
546 WREG32(mmVM_CONTEXT1_CNTL2, 4);
547 WREG32(mmVM_CONTEXT1_CNTL,
548 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
549 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
550 ((adev->vm_manager.block_size - 9)
551 << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
552 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
553 gmc_v6_0_set_fault_enable_default(adev, false);
554 else
555 gmc_v6_0_set_fault_enable_default(adev, true);
556
557 gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
558 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
559 (unsigned)(adev->gmc.gart_size >> 20),
560 (unsigned long long)table_addr);
561 adev->gart.ready = true;
562 return 0;
563}
564
565static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
566{
567 int r;
568
569 if (adev->gart.bo) {
570 dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
571 return 0;
572 }
573 r = amdgpu_gart_init(adev);
574 if (r)
575 return r;
576 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
577 adev->gart.gart_pte_flags = 0;
578 return amdgpu_gart_table_vram_alloc(adev);
579}
580
581static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
582{
583
584
585
586
587
588
589
590
591
592
593
594
595 WREG32(mmVM_CONTEXT0_CNTL, 0);
596 WREG32(mmVM_CONTEXT1_CNTL, 0);
597
598 WREG32(mmMC_VM_MX_L1_TLB_CNTL,
599 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
600 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
601
602 WREG32(mmVM_L2_CNTL,
603 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
604 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
605 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
606 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
607 WREG32(mmVM_L2_CNTL2, 0);
608 WREG32(mmVM_L2_CNTL3,
609 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
610 (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
611 amdgpu_gart_table_vram_unpin(adev);
612}
613
614static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
615 u32 status, u32 addr, u32 mc_client)
616{
617 u32 mc_id;
618 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
619 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
620 PROTECTIONS);
621 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
622 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
623
624 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
625 MEMORY_CLIENT_ID);
626
627 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
628 protections, vmid, addr,
629 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
630 MEMORY_CLIENT_RW) ?
631 "write" : "read", block, mc_client, mc_id);
632}
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
762{
763 switch (mc_seq_vram_type) {
764 case MC_SEQ_MISC0__MT__GDDR1:
765 return AMDGPU_VRAM_TYPE_GDDR1;
766 case MC_SEQ_MISC0__MT__DDR2:
767 return AMDGPU_VRAM_TYPE_DDR2;
768 case MC_SEQ_MISC0__MT__GDDR3:
769 return AMDGPU_VRAM_TYPE_GDDR3;
770 case MC_SEQ_MISC0__MT__GDDR4:
771 return AMDGPU_VRAM_TYPE_GDDR4;
772 case MC_SEQ_MISC0__MT__GDDR5:
773 return AMDGPU_VRAM_TYPE_GDDR5;
774 case MC_SEQ_MISC0__MT__DDR3:
775 return AMDGPU_VRAM_TYPE_DDR3;
776 default:
777 return AMDGPU_VRAM_TYPE_UNKNOWN;
778 }
779}
780
781static int gmc_v6_0_early_init(void *handle)
782{
783 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
784
785 gmc_v6_0_set_gmc_funcs(adev);
786 gmc_v6_0_set_irq_funcs(adev);
787
788 return 0;
789}
790
791static int gmc_v6_0_late_init(void *handle)
792{
793 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
794
795 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
796 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
797 else
798 return 0;
799}
800
801static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
802{
803 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
804 unsigned size;
805
806 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
807 size = AMDGPU_VBIOS_VGA_ALLOCATION;
808 } else {
809 u32 viewport = RREG32(mmVIEWPORT_SIZE);
810 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
811 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
812 4);
813 }
814 return size;
815}
816
817static int gmc_v6_0_sw_init(void *handle)
818{
819 int r;
820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821
822 adev->num_vmhubs = 1;
823
824 if (adev->flags & AMD_IS_APU) {
825 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
826 } else {
827 u32 tmp = RREG32(mmMC_SEQ_MISC0);
828 tmp &= MC_SEQ_MISC0__MT__MASK;
829 adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
830 }
831
832 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
833 if (r)
834 return r;
835
836 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
837 if (r)
838 return r;
839
840 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
841
842 adev->gmc.mc_mask = 0xffffffffffULL;
843
844 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
845 if (r) {
846 dev_warn(adev->dev, "No suitable DMA available.\n");
847 return r;
848 }
849 adev->need_swiotlb = drm_need_swiotlb(44);
850
851 r = gmc_v6_0_init_microcode(adev);
852 if (r) {
853 dev_err(adev->dev, "Failed to load mc firmware!\n");
854 return r;
855 }
856
857 r = gmc_v6_0_mc_init(adev);
858 if (r)
859 return r;
860
861 amdgpu_gmc_get_vbios_allocations(adev);
862
863 r = amdgpu_bo_init(adev);
864 if (r)
865 return r;
866
867 r = gmc_v6_0_gart_init(adev);
868 if (r)
869 return r;
870
871
872
873
874
875
876
877 adev->vm_manager.first_kfd_vmid = 8;
878 amdgpu_vm_manager_init(adev);
879
880
881 if (adev->flags & AMD_IS_APU) {
882 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
883
884 tmp <<= 22;
885 adev->vm_manager.vram_base_offset = tmp;
886 } else {
887 adev->vm_manager.vram_base_offset = 0;
888 }
889
890 return 0;
891}
892
893static int gmc_v6_0_sw_fini(void *handle)
894{
895 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
896
897 amdgpu_gem_force_release(adev);
898 amdgpu_vm_manager_fini(adev);
899 amdgpu_gart_table_vram_free(adev);
900 amdgpu_bo_fini(adev);
901 release_firmware(adev->gmc.fw);
902 adev->gmc.fw = NULL;
903
904 return 0;
905}
906
907static int gmc_v6_0_hw_init(void *handle)
908{
909 int r;
910 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
911
912 gmc_v6_0_mc_program(adev);
913
914 if (!(adev->flags & AMD_IS_APU)) {
915 r = gmc_v6_0_mc_load_microcode(adev);
916 if (r) {
917 dev_err(adev->dev, "Failed to load MC firmware!\n");
918 return r;
919 }
920 }
921
922 r = gmc_v6_0_gart_enable(adev);
923 if (r)
924 return r;
925
926 return r;
927}
928
929static int gmc_v6_0_hw_fini(void *handle)
930{
931 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
932
933 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
934 gmc_v6_0_gart_disable(adev);
935
936 return 0;
937}
938
939static int gmc_v6_0_suspend(void *handle)
940{
941 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
942
943 gmc_v6_0_hw_fini(adev);
944
945 return 0;
946}
947
948static int gmc_v6_0_resume(void *handle)
949{
950 int r;
951 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
952
953 r = gmc_v6_0_hw_init(adev);
954 if (r)
955 return r;
956
957 amdgpu_vmid_reset_all(adev);
958
959 return 0;
960}
961
962static bool gmc_v6_0_is_idle(void *handle)
963{
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 u32 tmp = RREG32(mmSRBM_STATUS);
966
967 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
968 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
969 return false;
970
971 return true;
972}
973
974static int gmc_v6_0_wait_for_idle(void *handle)
975{
976 unsigned i;
977 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
978
979 for (i = 0; i < adev->usec_timeout; i++) {
980 if (gmc_v6_0_is_idle(handle))
981 return 0;
982 udelay(1);
983 }
984 return -ETIMEDOUT;
985
986}
987
988static int gmc_v6_0_soft_reset(void *handle)
989{
990 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
991 u32 srbm_soft_reset = 0;
992 u32 tmp = RREG32(mmSRBM_STATUS);
993
994 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
995 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
996 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
997
998 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
999 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1000 if (!(adev->flags & AMD_IS_APU))
1001 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1002 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1003 }
1004
1005 if (srbm_soft_reset) {
1006 gmc_v6_0_mc_stop(adev);
1007 if (gmc_v6_0_wait_for_idle(adev)) {
1008 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1009 }
1010
1011
1012 tmp = RREG32(mmSRBM_SOFT_RESET);
1013 tmp |= srbm_soft_reset;
1014 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1015 WREG32(mmSRBM_SOFT_RESET, tmp);
1016 tmp = RREG32(mmSRBM_SOFT_RESET);
1017
1018 udelay(50);
1019
1020 tmp &= ~srbm_soft_reset;
1021 WREG32(mmSRBM_SOFT_RESET, tmp);
1022 tmp = RREG32(mmSRBM_SOFT_RESET);
1023
1024 udelay(50);
1025
1026 gmc_v6_0_mc_resume(adev);
1027 udelay(50);
1028 }
1029
1030 return 0;
1031}
1032
1033static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1034 struct amdgpu_irq_src *src,
1035 unsigned type,
1036 enum amdgpu_interrupt_state state)
1037{
1038 u32 tmp;
1039 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1040 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1041 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1042 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1043 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1044 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1045
1046 switch (state) {
1047 case AMDGPU_IRQ_STATE_DISABLE:
1048 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1049 tmp &= ~bits;
1050 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1051 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1052 tmp &= ~bits;
1053 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1054 break;
1055 case AMDGPU_IRQ_STATE_ENABLE:
1056 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1057 tmp |= bits;
1058 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1059 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1060 tmp |= bits;
1061 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1062 break;
1063 default:
1064 break;
1065 }
1066
1067 return 0;
1068}
1069
1070static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1071 struct amdgpu_irq_src *source,
1072 struct amdgpu_iv_entry *entry)
1073{
1074 u32 addr, status;
1075
1076 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1077 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1078 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1079
1080 if (!addr && !status)
1081 return 0;
1082
1083 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1084 gmc_v6_0_set_fault_enable_default(adev, false);
1085
1086 if (printk_ratelimit()) {
1087 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1088 entry->src_id, entry->src_data[0]);
1089 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1090 addr);
1091 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1092 status);
1093 gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1094 }
1095
1096 return 0;
1097}
1098
1099static int gmc_v6_0_set_clockgating_state(void *handle,
1100 enum amd_clockgating_state state)
1101{
1102 return 0;
1103}
1104
1105static int gmc_v6_0_set_powergating_state(void *handle,
1106 enum amd_powergating_state state)
1107{
1108 return 0;
1109}
1110
1111static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
1112 .name = "gmc_v6_0",
1113 .early_init = gmc_v6_0_early_init,
1114 .late_init = gmc_v6_0_late_init,
1115 .sw_init = gmc_v6_0_sw_init,
1116 .sw_fini = gmc_v6_0_sw_fini,
1117 .hw_init = gmc_v6_0_hw_init,
1118 .hw_fini = gmc_v6_0_hw_fini,
1119 .suspend = gmc_v6_0_suspend,
1120 .resume = gmc_v6_0_resume,
1121 .is_idle = gmc_v6_0_is_idle,
1122 .wait_for_idle = gmc_v6_0_wait_for_idle,
1123 .soft_reset = gmc_v6_0_soft_reset,
1124 .set_clockgating_state = gmc_v6_0_set_clockgating_state,
1125 .set_powergating_state = gmc_v6_0_set_powergating_state,
1126};
1127
1128static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
1129 .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
1130 .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
1131 .set_prt = gmc_v6_0_set_prt,
1132 .get_vm_pde = gmc_v6_0_get_vm_pde,
1133 .get_vm_pte = gmc_v6_0_get_vm_pte,
1134 .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
1135};
1136
1137static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
1138 .set = gmc_v6_0_vm_fault_interrupt_state,
1139 .process = gmc_v6_0_process_interrupt,
1140};
1141
1142static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
1143{
1144 adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
1145}
1146
1147static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1148{
1149 adev->gmc.vm_fault.num_types = 1;
1150 adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
1151}
1152
1153const struct amdgpu_ip_block_version gmc_v6_0_ip_block =
1154{
1155 .type = AMD_IP_BLOCK_TYPE_GMC,
1156 .major = 6,
1157 .minor = 0,
1158 .rev = 0,
1159 .funcs = &gmc_v6_0_ip_funcs,
1160};
1161