1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
28#include "umc_v8_7.h"
29
30#include "athub/athub_2_0_0_sh_mask.h"
31#include "athub/athub_2_0_0_offset.h"
32#include "dcn/dcn_2_0_0_offset.h"
33#include "dcn/dcn_2_0_0_sh_mask.h"
34#include "oss/osssys_5_0_0_offset.h"
35#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36#include "navi10_enum.h"
37
38#include "soc15.h"
39#include "soc15d.h"
40#include "soc15_common.h"
41
42#include "nbio_v2_3.h"
43
44#include "gfxhub_v2_0.h"
45#include "gfxhub_v2_1.h"
46#include "mmhub_v2_0.h"
47#include "mmhub_v2_3.h"
48#include "athub_v2_0.h"
49#include "athub_v2_1.h"
50
51#if 0
52static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53{
54
55};
56#endif
57
58static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
59 struct amdgpu_irq_src *src,
60 unsigned type,
61 enum amdgpu_interrupt_state state)
62{
63 return 0;
64}
65
66static int
67gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
68 struct amdgpu_irq_src *src, unsigned type,
69 enum amdgpu_interrupt_state state)
70{
71 switch (state) {
72 case AMDGPU_IRQ_STATE_DISABLE:
73
74 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
75
76 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
77 break;
78 case AMDGPU_IRQ_STATE_ENABLE:
79
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
81
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
83 break;
84 default:
85 break;
86 }
87
88 return 0;
89}
90
91static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
92 struct amdgpu_irq_src *source,
93 struct amdgpu_iv_entry *entry)
94{
95 bool retry_fault = !!(entry->src_data[1] & 0x80);
96 bool write_fault = !!(entry->src_data[1] & 0x20);
97 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
98 struct amdgpu_task_info task_info;
99 uint32_t status = 0;
100 u64 addr;
101
102 addr = (u64)entry->src_data[0] << 12;
103 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
104
105 if (retry_fault) {
106
107
108
109 if (entry->ih != &adev->irq.ih_soft &&
110 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
111 entry->timestamp))
112 return 1;
113
114
115
116
117 if (entry->ih == &adev->irq.ih) {
118 amdgpu_irq_delegate(adev, entry, 8);
119 return 1;
120 }
121
122
123
124
125 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
126 return 1;
127 }
128
129 if (!amdgpu_sriov_vf(adev)) {
130
131
132
133
134
135 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
136 (adev->asic_type < CHIP_SIENNA_CICHLID))
137 RREG32(hub->vm_l2_pro_fault_status);
138
139 status = RREG32(hub->vm_l2_pro_fault_status);
140 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
141 }
142
143 if (!printk_ratelimit())
144 return 0;
145
146 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
147 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
148
149 dev_err(adev->dev,
150 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
151 "for process %s pid %d thread %s pid %d)\n",
152 entry->vmid_src ? "mmhub" : "gfxhub",
153 entry->src_id, entry->ring_id, entry->vmid,
154 entry->pasid, task_info.process_name, task_info.tgid,
155 task_info.task_name, task_info.pid);
156 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
157 addr, entry->client_id,
158 soc15_ih_clientid_name[entry->client_id]);
159
160 if (!amdgpu_sriov_vf(adev))
161 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
162 status);
163
164 return 0;
165}
166
167static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
168 .set = gmc_v10_0_vm_fault_interrupt_state,
169 .process = gmc_v10_0_process_interrupt,
170};
171
172static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
173 .set = gmc_v10_0_ecc_interrupt_state,
174 .process = amdgpu_umc_process_ecc_irq,
175};
176
177static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
178{
179 adev->gmc.vm_fault.num_types = 1;
180 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
181
182 if (!amdgpu_sriov_vf(adev)) {
183 adev->gmc.ecc_irq.num_types = 1;
184 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
185 }
186}
187
188
189
190
191
192
193
194
195static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
196 uint32_t vmhub)
197{
198 return ((vmhub == AMDGPU_MMHUB_0 ||
199 vmhub == AMDGPU_MMHUB_1) &&
200 (!amdgpu_sriov_vf(adev)));
201}
202
203static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
204 struct amdgpu_device *adev,
205 uint8_t vmid, uint16_t *p_pasid)
206{
207 uint32_t value;
208
209 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
210 + vmid);
211 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
212
213 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
214}
215
216
217
218
219
220
221
222
223static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
224 unsigned int vmhub, uint32_t flush_type)
225{
226 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
227 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
228 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
229 u32 tmp;
230
231 const unsigned eng = 17;
232 unsigned int i;
233 unsigned char hub_ip = 0;
234
235 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
236 GC_HWIP : MMHUB_HWIP;
237
238 spin_lock(&adev->gmc.invalidate_lock);
239
240
241
242
243
244
245
246
247 if (use_semaphore) {
248 for (i = 0; i < adev->usec_timeout; i++) {
249
250 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
251 hub->eng_distance * eng, hub_ip);
252
253 if (tmp & 0x1)
254 break;
255 udelay(1);
256 }
257
258 if (i >= adev->usec_timeout)
259 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
260 }
261
262 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
263 hub->eng_distance * eng,
264 inv_req, hub_ip);
265
266
267
268
269
270 if ((vmhub == AMDGPU_GFXHUB_0) &&
271 (adev->asic_type < CHIP_SIENNA_CICHLID))
272 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
273 hub->eng_distance * eng, hub_ip);
274
275
276 for (i = 0; i < adev->usec_timeout; i++) {
277 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
278 hub->eng_distance * eng, hub_ip);
279
280 tmp &= 1 << vmid;
281 if (tmp)
282 break;
283
284 udelay(1);
285 }
286
287
288 if (use_semaphore)
289
290
291
292
293 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
294 hub->eng_distance * eng, 0, hub_ip);
295
296 spin_unlock(&adev->gmc.invalidate_lock);
297
298 if (i < adev->usec_timeout)
299 return;
300
301 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
302}
303
304
305
306
307
308
309
310
311
312
313
314static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
315 uint32_t vmhub, uint32_t flush_type)
316{
317 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
318 struct dma_fence *fence;
319 struct amdgpu_job *job;
320
321 int r;
322
323
324 adev->hdp.funcs->flush_hdp(adev, NULL);
325
326
327
328
329 if (adev->gfx.kiq.ring.sched.ready &&
330 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
331 down_read_trylock(&adev->reset_sem)) {
332 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
333 const unsigned eng = 17;
334 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
335 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
336 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
337
338 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
339 1 << vmid);
340
341 up_read(&adev->reset_sem);
342 return;
343 }
344
345 mutex_lock(&adev->mman.gtt_window_lock);
346
347 if (vmhub == AMDGPU_MMHUB_0) {
348 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
349 mutex_unlock(&adev->mman.gtt_window_lock);
350 return;
351 }
352
353 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
354
355 if (!adev->mman.buffer_funcs_enabled ||
356 !adev->ib_pool_ready ||
357 amdgpu_in_reset(adev) ||
358 ring->sched.ready == false) {
359 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
360 mutex_unlock(&adev->mman.gtt_window_lock);
361 return;
362 }
363
364
365
366
367
368
369 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
370 &job);
371 if (r)
372 goto error_alloc;
373
374 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
375 job->vm_needs_flush = true;
376 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
377 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
378 r = amdgpu_job_submit(job, &adev->mman.entity,
379 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
380 if (r)
381 goto error_submit;
382
383 mutex_unlock(&adev->mman.gtt_window_lock);
384
385 dma_fence_wait(fence, false);
386 dma_fence_put(fence);
387
388 return;
389
390error_submit:
391 amdgpu_job_free(job);
392
393error_alloc:
394 mutex_unlock(&adev->mman.gtt_window_lock);
395 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
396}
397
398
399
400
401
402
403
404
405
406
407
408static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
409 uint16_t pasid, uint32_t flush_type,
410 bool all_hub)
411{
412 int vmid, i;
413 signed long r;
414 uint32_t seq;
415 uint16_t queried_pasid;
416 bool ret;
417 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
418 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
419
420 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
421 spin_lock(&adev->gfx.kiq.ring_lock);
422
423 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
424 kiq->pmf->kiq_invalidate_tlbs(ring,
425 pasid, flush_type, all_hub);
426 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
427 if (r) {
428 amdgpu_ring_undo(ring);
429 spin_unlock(&adev->gfx.kiq.ring_lock);
430 return -ETIME;
431 }
432
433 amdgpu_ring_commit(ring);
434 spin_unlock(&adev->gfx.kiq.ring_lock);
435 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
436 if (r < 1) {
437 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
438 return -ETIME;
439 }
440
441 return 0;
442 }
443
444 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
445
446 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
447 &queried_pasid);
448 if (ret && queried_pasid == pasid) {
449 if (all_hub) {
450 for (i = 0; i < adev->num_vmhubs; i++)
451 gmc_v10_0_flush_gpu_tlb(adev, vmid,
452 i, flush_type);
453 } else {
454 gmc_v10_0_flush_gpu_tlb(adev, vmid,
455 AMDGPU_GFXHUB_0, flush_type);
456 }
457 break;
458 }
459 }
460
461 return 0;
462}
463
464static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
465 unsigned vmid, uint64_t pd_addr)
466{
467 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
468 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
469 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
470 unsigned eng = ring->vm_inv_eng;
471
472
473
474
475
476
477
478
479
480 if (use_semaphore)
481
482 amdgpu_ring_emit_reg_wait(ring,
483 hub->vm_inv_eng0_sem +
484 hub->eng_distance * eng, 0x1, 0x1);
485
486 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
487 (hub->ctx_addr_distance * vmid),
488 lower_32_bits(pd_addr));
489
490 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
491 (hub->ctx_addr_distance * vmid),
492 upper_32_bits(pd_addr));
493
494 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
495 hub->eng_distance * eng,
496 hub->vm_inv_eng0_ack +
497 hub->eng_distance * eng,
498 req, 1 << vmid);
499
500
501 if (use_semaphore)
502
503
504
505
506 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
507 hub->eng_distance * eng, 0);
508
509 return pd_addr;
510}
511
512static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
513 unsigned pasid)
514{
515 struct amdgpu_device *adev = ring->adev;
516 uint32_t reg;
517
518 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
519 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
520 else
521 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
522
523 amdgpu_ring_emit_wreg(ring, reg, pasid);
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
560{
561 switch (flags) {
562 case AMDGPU_VM_MTYPE_DEFAULT:
563 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
564 case AMDGPU_VM_MTYPE_NC:
565 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
566 case AMDGPU_VM_MTYPE_WC:
567 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
568 case AMDGPU_VM_MTYPE_CC:
569 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
570 case AMDGPU_VM_MTYPE_UC:
571 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
572 default:
573 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
574 }
575}
576
577static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
578 uint64_t *addr, uint64_t *flags)
579{
580 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
581 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
582 BUG_ON(*addr & 0xFFFF00000000003FULL);
583
584 if (!adev->gmc.translate_further)
585 return;
586
587 if (level == AMDGPU_VM_PDB1) {
588
589 if (!(*flags & AMDGPU_PDE_PTE))
590 *flags |= AMDGPU_PDE_BFS(0x9);
591
592 } else if (level == AMDGPU_VM_PDB0) {
593 if (*flags & AMDGPU_PDE_PTE)
594 *flags &= ~AMDGPU_PDE_PTE;
595 else
596 *flags |= AMDGPU_PTE_TF;
597 }
598}
599
600static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
601 struct amdgpu_bo_va_mapping *mapping,
602 uint64_t *flags)
603{
604 *flags &= ~AMDGPU_PTE_EXECUTABLE;
605 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
606
607 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
608 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
609
610 if (mapping->flags & AMDGPU_PTE_PRT) {
611 *flags |= AMDGPU_PTE_PRT;
612 *flags |= AMDGPU_PTE_SNOOPED;
613 *flags |= AMDGPU_PTE_LOG;
614 *flags |= AMDGPU_PTE_SYSTEM;
615 *flags &= ~AMDGPU_PTE_VALID;
616 }
617}
618
619static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
620{
621 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
622 unsigned size;
623
624 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
625 size = AMDGPU_VBIOS_VGA_ALLOCATION;
626 } else {
627 u32 viewport;
628 u32 pitch;
629
630 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
631 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
632 size = (REG_GET_FIELD(viewport,
633 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
634 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
635 4);
636 }
637
638 return size;
639}
640
641static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
642 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
643 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
644 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
645 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
646 .map_mtype = gmc_v10_0_map_mtype,
647 .get_vm_pde = gmc_v10_0_get_vm_pde,
648 .get_vm_pte = gmc_v10_0_get_vm_pte,
649 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
650};
651
652static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
653{
654 if (adev->gmc.gmc_funcs == NULL)
655 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
656}
657
658static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
659{
660 switch (adev->asic_type) {
661 case CHIP_SIENNA_CICHLID:
662 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
663 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
664 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
665 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
666 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
667 adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
668 break;
669 default:
670 break;
671 }
672}
673
674
675static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
676{
677 switch (adev->asic_type) {
678 case CHIP_VANGOGH:
679 case CHIP_YELLOW_CARP:
680 adev->mmhub.funcs = &mmhub_v2_3_funcs;
681 break;
682 default:
683 adev->mmhub.funcs = &mmhub_v2_0_funcs;
684 break;
685 }
686}
687
688static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
689{
690 switch (adev->asic_type) {
691 case CHIP_SIENNA_CICHLID:
692 case CHIP_NAVY_FLOUNDER:
693 case CHIP_VANGOGH:
694 case CHIP_DIMGREY_CAVEFISH:
695 case CHIP_BEIGE_GOBY:
696 case CHIP_YELLOW_CARP:
697 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
698 break;
699 default:
700 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
701 break;
702 }
703}
704
705
706static int gmc_v10_0_early_init(void *handle)
707{
708 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
709
710 gmc_v10_0_set_mmhub_funcs(adev);
711 gmc_v10_0_set_gfxhub_funcs(adev);
712 gmc_v10_0_set_gmc_funcs(adev);
713 gmc_v10_0_set_irq_funcs(adev);
714 gmc_v10_0_set_umc_funcs(adev);
715
716 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
717 adev->gmc.shared_aperture_end =
718 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
719 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
720 adev->gmc.private_aperture_end =
721 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
722
723 return 0;
724}
725
726static int gmc_v10_0_late_init(void *handle)
727{
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729 int r;
730
731 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
732 if (r)
733 return r;
734
735 r = amdgpu_gmc_ras_late_init(adev);
736 if (r)
737 return r;
738
739 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
740}
741
742static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
743 struct amdgpu_gmc *mc)
744{
745 u64 base = 0;
746
747 base = adev->gfxhub.funcs->get_fb_location(adev);
748
749
750 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
751
752 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
753 amdgpu_gmc_gart_location(adev, mc);
754 amdgpu_gmc_agp_location(adev, mc);
755
756
757 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
758
759
760 adev->vm_manager.vram_base_offset +=
761 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
762}
763
764
765
766
767
768
769
770
771
772
773static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
774{
775 int r;
776
777
778 adev->gmc.mc_vram_size =
779 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
780 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
781
782 if (!(adev->flags & AMD_IS_APU)) {
783 r = amdgpu_device_resize_fb_bar(adev);
784 if (r)
785 return r;
786 }
787 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
788 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
789
790#ifdef CONFIG_X86_64
791 if (adev->flags & AMD_IS_APU) {
792 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
793 adev->gmc.aper_size = adev->gmc.real_vram_size;
794 }
795#endif
796
797
798 adev->gmc.visible_vram_size = adev->gmc.aper_size;
799 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
800 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
801
802
803 if (amdgpu_gart_size == -1) {
804 switch (adev->asic_type) {
805 case CHIP_NAVI10:
806 case CHIP_NAVI14:
807 case CHIP_NAVI12:
808 case CHIP_SIENNA_CICHLID:
809 case CHIP_NAVY_FLOUNDER:
810 case CHIP_VANGOGH:
811 case CHIP_DIMGREY_CAVEFISH:
812 case CHIP_BEIGE_GOBY:
813 case CHIP_YELLOW_CARP:
814 case CHIP_CYAN_SKILLFISH:
815 default:
816 adev->gmc.gart_size = 512ULL << 20;
817 break;
818 }
819 } else
820 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
821
822 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
823
824 return 0;
825}
826
827static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
828{
829 int r;
830
831 if (adev->gart.bo) {
832 WARN(1, "NAVI10 PCIE GART already initialized\n");
833 return 0;
834 }
835
836
837 r = amdgpu_gart_init(adev);
838 if (r)
839 return r;
840
841 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
842 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
843 AMDGPU_PTE_EXECUTABLE;
844
845 return amdgpu_gart_table_vram_alloc(adev);
846}
847
848static int gmc_v10_0_sw_init(void *handle)
849{
850 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
851 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
852
853 adev->gfxhub.funcs->init(adev);
854
855 adev->mmhub.funcs->init(adev);
856
857 spin_lock_init(&adev->gmc.invalidate_lock);
858
859 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
860 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
861 adev->gmc.vram_width = 64;
862 } else if (amdgpu_emu_mode == 1) {
863 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
864 adev->gmc.vram_width = 1 * 128;
865 } else {
866 r = amdgpu_atomfirmware_get_vram_info(adev,
867 &vram_width, &vram_type, &vram_vendor);
868 adev->gmc.vram_width = vram_width;
869
870 adev->gmc.vram_type = vram_type;
871 adev->gmc.vram_vendor = vram_vendor;
872 }
873
874 switch (adev->asic_type) {
875 case CHIP_NAVI10:
876 case CHIP_NAVI14:
877 case CHIP_NAVI12:
878 case CHIP_SIENNA_CICHLID:
879 case CHIP_NAVY_FLOUNDER:
880 case CHIP_VANGOGH:
881 case CHIP_DIMGREY_CAVEFISH:
882 case CHIP_BEIGE_GOBY:
883 case CHIP_YELLOW_CARP:
884 case CHIP_CYAN_SKILLFISH:
885 adev->num_vmhubs = 2;
886
887
888
889
890
891 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
892 break;
893 default:
894 break;
895 }
896
897
898 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
899 VMC_1_0__SRCID__VM_FAULT,
900 &adev->gmc.vm_fault);
901
902 if (r)
903 return r;
904
905 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
906 UTCL2_1_0__SRCID__FAULT,
907 &adev->gmc.vm_fault);
908 if (r)
909 return r;
910
911 if (!amdgpu_sriov_vf(adev)) {
912
913 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
914 &adev->gmc.ecc_irq);
915 if (r)
916 return r;
917 }
918
919
920
921
922
923 adev->gmc.mc_mask = 0xffffffffffffULL;
924
925 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
926 if (r) {
927 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
928 return r;
929 }
930
931 if (adev->gmc.xgmi.supported) {
932 r = adev->gfxhub.funcs->get_xgmi_info(adev);
933 if (r)
934 return r;
935 }
936
937 r = gmc_v10_0_mc_init(adev);
938 if (r)
939 return r;
940
941 amdgpu_gmc_get_vbios_allocations(adev);
942 amdgpu_gmc_get_reserved_allocation(adev);
943
944
945 r = amdgpu_bo_init(adev);
946 if (r)
947 return r;
948
949 r = gmc_v10_0_gart_init(adev);
950 if (r)
951 return r;
952
953
954
955
956
957
958
959 adev->vm_manager.first_kfd_vmid = 8;
960
961 amdgpu_vm_manager_init(adev);
962
963 return 0;
964}
965
966
967
968
969
970
971
972
973static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
974{
975 amdgpu_gart_table_vram_free(adev);
976}
977
978static int gmc_v10_0_sw_fini(void *handle)
979{
980 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
981
982 amdgpu_vm_manager_fini(adev);
983 gmc_v10_0_gart_fini(adev);
984 amdgpu_gem_force_release(adev);
985 amdgpu_bo_fini(adev);
986
987 return 0;
988}
989
990static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
991{
992 switch (adev->asic_type) {
993 case CHIP_NAVI10:
994 case CHIP_NAVI14:
995 case CHIP_NAVI12:
996 case CHIP_SIENNA_CICHLID:
997 case CHIP_NAVY_FLOUNDER:
998 case CHIP_VANGOGH:
999 case CHIP_DIMGREY_CAVEFISH:
1000 case CHIP_BEIGE_GOBY:
1001 case CHIP_YELLOW_CARP:
1002 case CHIP_CYAN_SKILLFISH:
1003 break;
1004 default:
1005 break;
1006 }
1007}
1008
1009
1010
1011
1012
1013
1014static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1015{
1016 int r;
1017 bool value;
1018
1019 if (adev->gart.bo == NULL) {
1020 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1021 return -EINVAL;
1022 }
1023
1024 r = amdgpu_gart_table_vram_pin(adev);
1025 if (r)
1026 return r;
1027
1028 r = adev->gfxhub.funcs->gart_enable(adev);
1029 if (r)
1030 return r;
1031
1032 r = adev->mmhub.funcs->gart_enable(adev);
1033 if (r)
1034 return r;
1035
1036 adev->hdp.funcs->init_registers(adev);
1037
1038
1039 adev->hdp.funcs->flush_hdp(adev, NULL);
1040
1041 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1042 false : true;
1043
1044 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1045 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1046 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1047 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1048
1049 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1050 (unsigned)(adev->gmc.gart_size >> 20),
1051 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1052
1053 adev->gart.ready = true;
1054
1055 return 0;
1056}
1057
1058static int gmc_v10_0_hw_init(void *handle)
1059{
1060 int r;
1061 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1062
1063
1064 gmc_v10_0_init_golden_registers(adev);
1065
1066
1067
1068
1069
1070 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1071 adev->gfxhub.funcs->utcl2_harvest(adev);
1072
1073 r = gmc_v10_0_gart_enable(adev);
1074 if (r)
1075 return r;
1076
1077 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1078 adev->umc.funcs->init_registers(adev);
1079
1080 return 0;
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1091{
1092 adev->gfxhub.funcs->gart_disable(adev);
1093 adev->mmhub.funcs->gart_disable(adev);
1094 amdgpu_gart_table_vram_unpin(adev);
1095}
1096
1097static int gmc_v10_0_hw_fini(void *handle)
1098{
1099 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1100
1101 gmc_v10_0_gart_disable(adev);
1102
1103 if (amdgpu_sriov_vf(adev)) {
1104
1105 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1106 return 0;
1107 }
1108
1109 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1110 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1111
1112 return 0;
1113}
1114
1115static int gmc_v10_0_suspend(void *handle)
1116{
1117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1118
1119 gmc_v10_0_hw_fini(adev);
1120
1121 return 0;
1122}
1123
1124static int gmc_v10_0_resume(void *handle)
1125{
1126 int r;
1127 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1128
1129 r = gmc_v10_0_hw_init(adev);
1130 if (r)
1131 return r;
1132
1133 amdgpu_vmid_reset_all(adev);
1134
1135 return 0;
1136}
1137
1138static bool gmc_v10_0_is_idle(void *handle)
1139{
1140
1141 return true;
1142}
1143
1144static int gmc_v10_0_wait_for_idle(void *handle)
1145{
1146
1147 return 0;
1148}
1149
1150static int gmc_v10_0_soft_reset(void *handle)
1151{
1152 return 0;
1153}
1154
1155static int gmc_v10_0_set_clockgating_state(void *handle,
1156 enum amd_clockgating_state state)
1157{
1158 int r;
1159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1160
1161 r = adev->mmhub.funcs->set_clockgating(adev, state);
1162 if (r)
1163 return r;
1164
1165 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1166 adev->asic_type <= CHIP_YELLOW_CARP)
1167 return athub_v2_1_set_clockgating(adev, state);
1168 else
1169 return athub_v2_0_set_clockgating(adev, state);
1170}
1171
1172static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1173{
1174 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1175
1176 adev->mmhub.funcs->get_clockgating(adev, flags);
1177
1178 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1179 adev->asic_type <= CHIP_YELLOW_CARP)
1180 athub_v2_1_get_clockgating(adev, flags);
1181 else
1182 athub_v2_0_get_clockgating(adev, flags);
1183}
1184
1185static int gmc_v10_0_set_powergating_state(void *handle,
1186 enum amd_powergating_state state)
1187{
1188 return 0;
1189}
1190
1191const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1192 .name = "gmc_v10_0",
1193 .early_init = gmc_v10_0_early_init,
1194 .late_init = gmc_v10_0_late_init,
1195 .sw_init = gmc_v10_0_sw_init,
1196 .sw_fini = gmc_v10_0_sw_fini,
1197 .hw_init = gmc_v10_0_hw_init,
1198 .hw_fini = gmc_v10_0_hw_fini,
1199 .suspend = gmc_v10_0_suspend,
1200 .resume = gmc_v10_0_resume,
1201 .is_idle = gmc_v10_0_is_idle,
1202 .wait_for_idle = gmc_v10_0_wait_for_idle,
1203 .soft_reset = gmc_v10_0_soft_reset,
1204 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1205 .set_powergating_state = gmc_v10_0_set_powergating_state,
1206 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1207};
1208
1209const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1210{
1211 .type = AMD_IP_BLOCK_TYPE_GMC,
1212 .major = 10,
1213 .minor = 0,
1214 .rev = 0,
1215 .funcs = &gmc_v10_0_ip_funcs,
1216};
1217