1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
28#include "umc_v8_7.h"
29
30#include "athub/athub_2_0_0_sh_mask.h"
31#include "athub/athub_2_0_0_offset.h"
32#include "dcn/dcn_2_0_0_offset.h"
33#include "dcn/dcn_2_0_0_sh_mask.h"
34#include "oss/osssys_5_0_0_offset.h"
35#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36#include "navi10_enum.h"
37
38#include "soc15.h"
39#include "soc15d.h"
40#include "soc15_common.h"
41
42#include "nbio_v2_3.h"
43
44#include "gfxhub_v2_0.h"
45#include "gfxhub_v2_1.h"
46#include "mmhub_v2_0.h"
47#include "mmhub_v2_3.h"
48#include "athub_v2_0.h"
49#include "athub_v2_1.h"
50
51#if 0
52static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53{
54
55};
56#endif
57
58static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
59 struct amdgpu_irq_src *src,
60 unsigned type,
61 enum amdgpu_interrupt_state state)
62{
63 return 0;
64}
65
66static int
67gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
68 struct amdgpu_irq_src *src, unsigned type,
69 enum amdgpu_interrupt_state state)
70{
71 switch (state) {
72 case AMDGPU_IRQ_STATE_DISABLE:
73
74 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
75
76 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
77 break;
78 case AMDGPU_IRQ_STATE_ENABLE:
79
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
81
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
83 break;
84 default:
85 break;
86 }
87
88 return 0;
89}
90
91static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
92 struct amdgpu_irq_src *source,
93 struct amdgpu_iv_entry *entry)
94{
95 bool retry_fault = !!(entry->src_data[1] & 0x80);
96 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
97 struct amdgpu_task_info task_info;
98 uint32_t status = 0;
99 u64 addr;
100
101 addr = (u64)entry->src_data[0] << 12;
102 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
103
104 if (retry_fault) {
105
106
107
108 if (entry->ih != &adev->irq.ih_soft &&
109 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
110 entry->timestamp))
111 return 1;
112
113
114
115
116 if (entry->ih == &adev->irq.ih) {
117 amdgpu_irq_delegate(adev, entry, 8);
118 return 1;
119 }
120
121
122
123
124 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
125 return 1;
126 }
127
128 if (!amdgpu_sriov_vf(adev)) {
129
130
131
132
133
134 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
135 (adev->asic_type < CHIP_SIENNA_CICHLID))
136 RREG32(hub->vm_l2_pro_fault_status);
137
138 status = RREG32(hub->vm_l2_pro_fault_status);
139 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
140 }
141
142 if (!printk_ratelimit())
143 return 0;
144
145 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
146 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
147
148 dev_err(adev->dev,
149 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
150 "for process %s pid %d thread %s pid %d)\n",
151 entry->vmid_src ? "mmhub" : "gfxhub",
152 entry->src_id, entry->ring_id, entry->vmid,
153 entry->pasid, task_info.process_name, task_info.tgid,
154 task_info.task_name, task_info.pid);
155 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
156 addr, entry->client_id,
157 soc15_ih_clientid_name[entry->client_id]);
158
159 if (!amdgpu_sriov_vf(adev))
160 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
161 status);
162
163 return 0;
164}
165
166static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
167 .set = gmc_v10_0_vm_fault_interrupt_state,
168 .process = gmc_v10_0_process_interrupt,
169};
170
171static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
172 .set = gmc_v10_0_ecc_interrupt_state,
173 .process = amdgpu_umc_process_ecc_irq,
174};
175
176static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
177{
178 adev->gmc.vm_fault.num_types = 1;
179 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
180
181 if (!amdgpu_sriov_vf(adev)) {
182 adev->gmc.ecc_irq.num_types = 1;
183 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
184 }
185}
186
187
188
189
190
191
192
193
194static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
195 uint32_t vmhub)
196{
197 return ((vmhub == AMDGPU_MMHUB_0 ||
198 vmhub == AMDGPU_MMHUB_1) &&
199 (!amdgpu_sriov_vf(adev)));
200}
201
202static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
203 struct amdgpu_device *adev,
204 uint8_t vmid, uint16_t *p_pasid)
205{
206 uint32_t value;
207
208 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
209 + vmid);
210 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
211
212 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
213}
214
215
216
217
218
219
220
221
222static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
223 unsigned int vmhub, uint32_t flush_type)
224{
225 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
226 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
227 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
228 u32 tmp;
229
230 const unsigned eng = 17;
231 unsigned int i;
232 unsigned char hub_ip = 0;
233
234 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
235 GC_HWIP : MMHUB_HWIP;
236
237 spin_lock(&adev->gmc.invalidate_lock);
238
239
240
241
242
243
244
245
246 if (use_semaphore) {
247 for (i = 0; i < adev->usec_timeout; i++) {
248
249 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
250 hub->eng_distance * eng, hub_ip);
251
252 if (tmp & 0x1)
253 break;
254 udelay(1);
255 }
256
257 if (i >= adev->usec_timeout)
258 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
259 }
260
261 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
262 hub->eng_distance * eng,
263 inv_req, hub_ip);
264
265
266
267
268
269 if ((vmhub == AMDGPU_GFXHUB_0) &&
270 (adev->asic_type < CHIP_SIENNA_CICHLID))
271 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
272 hub->eng_distance * eng, hub_ip);
273
274
275 for (i = 0; i < adev->usec_timeout; i++) {
276 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
277 hub->eng_distance * eng, hub_ip);
278
279 tmp &= 1 << vmid;
280 if (tmp)
281 break;
282
283 udelay(1);
284 }
285
286
287 if (use_semaphore)
288
289
290
291
292 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
293 hub->eng_distance * eng, 0, hub_ip);
294
295 spin_unlock(&adev->gmc.invalidate_lock);
296
297 if (i < adev->usec_timeout)
298 return;
299
300 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
301}
302
303
304
305
306
307
308
309
310
311
312
313static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
314 uint32_t vmhub, uint32_t flush_type)
315{
316 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
317 struct dma_fence *fence;
318 struct amdgpu_job *job;
319
320 int r;
321
322
323 adev->hdp.funcs->flush_hdp(adev, NULL);
324
325
326
327
328 if (adev->gfx.kiq.ring.sched.ready &&
329 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
330 down_read_trylock(&adev->reset_sem)) {
331 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
332 const unsigned eng = 17;
333 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
334 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
335 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
336
337 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
338 1 << vmid);
339
340 up_read(&adev->reset_sem);
341 return;
342 }
343
344 mutex_lock(&adev->mman.gtt_window_lock);
345
346 if (vmhub == AMDGPU_MMHUB_0) {
347 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
348 mutex_unlock(&adev->mman.gtt_window_lock);
349 return;
350 }
351
352 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
353
354 if (!adev->mman.buffer_funcs_enabled ||
355 !adev->ib_pool_ready ||
356 amdgpu_in_reset(adev) ||
357 ring->sched.ready == false) {
358 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
359 mutex_unlock(&adev->mman.gtt_window_lock);
360 return;
361 }
362
363
364
365
366
367
368 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
369 &job);
370 if (r)
371 goto error_alloc;
372
373 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
374 job->vm_needs_flush = true;
375 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
376 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
377 r = amdgpu_job_submit(job, &adev->mman.entity,
378 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
379 if (r)
380 goto error_submit;
381
382 mutex_unlock(&adev->mman.gtt_window_lock);
383
384 dma_fence_wait(fence, false);
385 dma_fence_put(fence);
386
387 return;
388
389error_submit:
390 amdgpu_job_free(job);
391
392error_alloc:
393 mutex_unlock(&adev->mman.gtt_window_lock);
394 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
395}
396
397
398
399
400
401
402
403
404
405
406
407static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
408 uint16_t pasid, uint32_t flush_type,
409 bool all_hub)
410{
411 int vmid, i;
412 signed long r;
413 uint32_t seq;
414 uint16_t queried_pasid;
415 bool ret;
416 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
417 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
418
419 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
420 spin_lock(&adev->gfx.kiq.ring_lock);
421
422 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
423 kiq->pmf->kiq_invalidate_tlbs(ring,
424 pasid, flush_type, all_hub);
425 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
426 if (r) {
427 amdgpu_ring_undo(ring);
428 spin_unlock(&adev->gfx.kiq.ring_lock);
429 return -ETIME;
430 }
431
432 amdgpu_ring_commit(ring);
433 spin_unlock(&adev->gfx.kiq.ring_lock);
434 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
435 if (r < 1) {
436 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
437 return -ETIME;
438 }
439
440 return 0;
441 }
442
443 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
444
445 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
446 &queried_pasid);
447 if (ret && queried_pasid == pasid) {
448 if (all_hub) {
449 for (i = 0; i < adev->num_vmhubs; i++)
450 gmc_v10_0_flush_gpu_tlb(adev, vmid,
451 i, flush_type);
452 } else {
453 gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 AMDGPU_GFXHUB_0, flush_type);
455 }
456 break;
457 }
458 }
459
460 return 0;
461}
462
463static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
464 unsigned vmid, uint64_t pd_addr)
465{
466 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
467 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
468 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
469 unsigned eng = ring->vm_inv_eng;
470
471
472
473
474
475
476
477
478
479 if (use_semaphore)
480
481 amdgpu_ring_emit_reg_wait(ring,
482 hub->vm_inv_eng0_sem +
483 hub->eng_distance * eng, 0x1, 0x1);
484
485 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
486 (hub->ctx_addr_distance * vmid),
487 lower_32_bits(pd_addr));
488
489 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
490 (hub->ctx_addr_distance * vmid),
491 upper_32_bits(pd_addr));
492
493 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
494 hub->eng_distance * eng,
495 hub->vm_inv_eng0_ack +
496 hub->eng_distance * eng,
497 req, 1 << vmid);
498
499
500 if (use_semaphore)
501
502
503
504
505 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
506 hub->eng_distance * eng, 0);
507
508 return pd_addr;
509}
510
511static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
512 unsigned pasid)
513{
514 struct amdgpu_device *adev = ring->adev;
515 uint32_t reg;
516
517 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
518 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
519 else
520 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
521
522 amdgpu_ring_emit_wreg(ring, reg, pasid);
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
559{
560 switch (flags) {
561 case AMDGPU_VM_MTYPE_DEFAULT:
562 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
563 case AMDGPU_VM_MTYPE_NC:
564 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
565 case AMDGPU_VM_MTYPE_WC:
566 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
567 case AMDGPU_VM_MTYPE_CC:
568 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
569 case AMDGPU_VM_MTYPE_UC:
570 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
571 default:
572 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
573 }
574}
575
576static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
577 uint64_t *addr, uint64_t *flags)
578{
579 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
580 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
581 BUG_ON(*addr & 0xFFFF00000000003FULL);
582
583 if (!adev->gmc.translate_further)
584 return;
585
586 if (level == AMDGPU_VM_PDB1) {
587
588 if (!(*flags & AMDGPU_PDE_PTE))
589 *flags |= AMDGPU_PDE_BFS(0x9);
590
591 } else if (level == AMDGPU_VM_PDB0) {
592 if (*flags & AMDGPU_PDE_PTE)
593 *flags &= ~AMDGPU_PDE_PTE;
594 else
595 *flags |= AMDGPU_PTE_TF;
596 }
597}
598
599static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
600 struct amdgpu_bo_va_mapping *mapping,
601 uint64_t *flags)
602{
603 *flags &= ~AMDGPU_PTE_EXECUTABLE;
604 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
605
606 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
607 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
608
609 if (mapping->flags & AMDGPU_PTE_PRT) {
610 *flags |= AMDGPU_PTE_PRT;
611 *flags |= AMDGPU_PTE_SNOOPED;
612 *flags |= AMDGPU_PTE_LOG;
613 *flags |= AMDGPU_PTE_SYSTEM;
614 *flags &= ~AMDGPU_PTE_VALID;
615 }
616}
617
618static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
619{
620 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
621 unsigned size;
622
623 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
624 size = AMDGPU_VBIOS_VGA_ALLOCATION;
625 } else {
626 u32 viewport;
627 u32 pitch;
628
629 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
630 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
631 size = (REG_GET_FIELD(viewport,
632 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
633 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
634 4);
635 }
636
637 return size;
638}
639
640static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
641 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
642 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
643 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
644 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
645 .map_mtype = gmc_v10_0_map_mtype,
646 .get_vm_pde = gmc_v10_0_get_vm_pde,
647 .get_vm_pte = gmc_v10_0_get_vm_pte,
648 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
649};
650
651static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
652{
653 if (adev->gmc.gmc_funcs == NULL)
654 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
655}
656
657static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
658{
659 switch (adev->asic_type) {
660 case CHIP_SIENNA_CICHLID:
661 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
662 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
663 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
664 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
665 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
666 adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
667 break;
668 default:
669 break;
670 }
671}
672
673
674static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
675{
676 switch (adev->asic_type) {
677 case CHIP_VANGOGH:
678 case CHIP_YELLOW_CARP:
679 adev->mmhub.funcs = &mmhub_v2_3_funcs;
680 break;
681 default:
682 adev->mmhub.funcs = &mmhub_v2_0_funcs;
683 break;
684 }
685}
686
687static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
688{
689 switch (adev->asic_type) {
690 case CHIP_SIENNA_CICHLID:
691 case CHIP_NAVY_FLOUNDER:
692 case CHIP_VANGOGH:
693 case CHIP_DIMGREY_CAVEFISH:
694 case CHIP_BEIGE_GOBY:
695 case CHIP_YELLOW_CARP:
696 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
697 break;
698 default:
699 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
700 break;
701 }
702}
703
704
705static int gmc_v10_0_early_init(void *handle)
706{
707 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
708
709 gmc_v10_0_set_mmhub_funcs(adev);
710 gmc_v10_0_set_gfxhub_funcs(adev);
711 gmc_v10_0_set_gmc_funcs(adev);
712 gmc_v10_0_set_irq_funcs(adev);
713 gmc_v10_0_set_umc_funcs(adev);
714
715 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
716 adev->gmc.shared_aperture_end =
717 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
718 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
719 adev->gmc.private_aperture_end =
720 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
721
722 return 0;
723}
724
725static int gmc_v10_0_late_init(void *handle)
726{
727 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
728 int r;
729
730 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
731 if (r)
732 return r;
733
734 r = amdgpu_gmc_ras_late_init(adev);
735 if (r)
736 return r;
737
738 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
739}
740
741static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
742 struct amdgpu_gmc *mc)
743{
744 u64 base = 0;
745
746 base = adev->gfxhub.funcs->get_fb_location(adev);
747
748
749 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
750
751 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
752 amdgpu_gmc_gart_location(adev, mc);
753 amdgpu_gmc_agp_location(adev, mc);
754
755
756 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
757
758
759 adev->vm_manager.vram_base_offset +=
760 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
761}
762
763
764
765
766
767
768
769
770
771
772static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
773{
774 int r;
775
776
777 adev->gmc.mc_vram_size =
778 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
779 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
780
781 if (!(adev->flags & AMD_IS_APU)) {
782 r = amdgpu_device_resize_fb_bar(adev);
783 if (r)
784 return r;
785 }
786 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
787 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
788
789#ifdef CONFIG_X86_64
790 if (adev->flags & AMD_IS_APU) {
791 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
792 adev->gmc.aper_size = adev->gmc.real_vram_size;
793 }
794#endif
795
796
797 adev->gmc.visible_vram_size = adev->gmc.aper_size;
798 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
799 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
800
801
802 if (amdgpu_gart_size == -1) {
803 switch (adev->asic_type) {
804 case CHIP_NAVI10:
805 case CHIP_NAVI14:
806 case CHIP_NAVI12:
807 case CHIP_SIENNA_CICHLID:
808 case CHIP_NAVY_FLOUNDER:
809 case CHIP_VANGOGH:
810 case CHIP_DIMGREY_CAVEFISH:
811 case CHIP_BEIGE_GOBY:
812 case CHIP_YELLOW_CARP:
813 default:
814 adev->gmc.gart_size = 512ULL << 20;
815 break;
816 }
817 } else
818 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
819
820 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
821
822 return 0;
823}
824
825static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
826{
827 int r;
828
829 if (adev->gart.bo) {
830 WARN(1, "NAVI10 PCIE GART already initialized\n");
831 return 0;
832 }
833
834
835 r = amdgpu_gart_init(adev);
836 if (r)
837 return r;
838
839 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
840 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
841 AMDGPU_PTE_EXECUTABLE;
842
843 return amdgpu_gart_table_vram_alloc(adev);
844}
845
846static int gmc_v10_0_sw_init(void *handle)
847{
848 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
849 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
850
851 adev->gfxhub.funcs->init(adev);
852
853 adev->mmhub.funcs->init(adev);
854
855 spin_lock_init(&adev->gmc.invalidate_lock);
856
857 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
858 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
859 adev->gmc.vram_width = 64;
860 } else if (amdgpu_emu_mode == 1) {
861 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
862 adev->gmc.vram_width = 1 * 128;
863 } else {
864 r = amdgpu_atomfirmware_get_vram_info(adev,
865 &vram_width, &vram_type, &vram_vendor);
866 adev->gmc.vram_width = vram_width;
867
868 adev->gmc.vram_type = vram_type;
869 adev->gmc.vram_vendor = vram_vendor;
870 }
871
872 switch (adev->asic_type) {
873 case CHIP_NAVI10:
874 case CHIP_NAVI14:
875 case CHIP_NAVI12:
876 case CHIP_SIENNA_CICHLID:
877 case CHIP_NAVY_FLOUNDER:
878 case CHIP_VANGOGH:
879 case CHIP_DIMGREY_CAVEFISH:
880 case CHIP_BEIGE_GOBY:
881 case CHIP_YELLOW_CARP:
882 adev->num_vmhubs = 2;
883
884
885
886
887
888 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
889 break;
890 default:
891 break;
892 }
893
894
895 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
896 VMC_1_0__SRCID__VM_FAULT,
897 &adev->gmc.vm_fault);
898
899 if (r)
900 return r;
901
902 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
903 UTCL2_1_0__SRCID__FAULT,
904 &adev->gmc.vm_fault);
905 if (r)
906 return r;
907
908 if (!amdgpu_sriov_vf(adev)) {
909
910 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
911 &adev->gmc.ecc_irq);
912 if (r)
913 return r;
914 }
915
916
917
918
919
920 adev->gmc.mc_mask = 0xffffffffffffULL;
921
922 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
923 if (r) {
924 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
925 return r;
926 }
927
928 if (adev->gmc.xgmi.supported) {
929 r = adev->gfxhub.funcs->get_xgmi_info(adev);
930 if (r)
931 return r;
932 }
933
934 r = gmc_v10_0_mc_init(adev);
935 if (r)
936 return r;
937
938 amdgpu_gmc_get_vbios_allocations(adev);
939 amdgpu_gmc_get_reserved_allocation(adev);
940
941
942 r = amdgpu_bo_init(adev);
943 if (r)
944 return r;
945
946 r = gmc_v10_0_gart_init(adev);
947 if (r)
948 return r;
949
950
951
952
953
954
955
956 adev->vm_manager.first_kfd_vmid = 8;
957
958 amdgpu_vm_manager_init(adev);
959
960 return 0;
961}
962
963
964
965
966
967
968
969
970static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
971{
972 amdgpu_gart_table_vram_free(adev);
973}
974
975static int gmc_v10_0_sw_fini(void *handle)
976{
977 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
978
979 amdgpu_vm_manager_fini(adev);
980 gmc_v10_0_gart_fini(adev);
981 amdgpu_gem_force_release(adev);
982 amdgpu_bo_fini(adev);
983
984 return 0;
985}
986
987static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
988{
989 switch (adev->asic_type) {
990 case CHIP_NAVI10:
991 case CHIP_NAVI14:
992 case CHIP_NAVI12:
993 case CHIP_SIENNA_CICHLID:
994 case CHIP_NAVY_FLOUNDER:
995 case CHIP_VANGOGH:
996 case CHIP_DIMGREY_CAVEFISH:
997 case CHIP_BEIGE_GOBY:
998 case CHIP_YELLOW_CARP:
999 break;
1000 default:
1001 break;
1002 }
1003}
1004
1005
1006
1007
1008
1009
1010static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1011{
1012 int r;
1013 bool value;
1014
1015 if (adev->gart.bo == NULL) {
1016 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1017 return -EINVAL;
1018 }
1019
1020 r = amdgpu_gart_table_vram_pin(adev);
1021 if (r)
1022 return r;
1023
1024 r = adev->gfxhub.funcs->gart_enable(adev);
1025 if (r)
1026 return r;
1027
1028 r = adev->mmhub.funcs->gart_enable(adev);
1029 if (r)
1030 return r;
1031
1032 adev->hdp.funcs->init_registers(adev);
1033
1034
1035 adev->hdp.funcs->flush_hdp(adev, NULL);
1036
1037 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1038 false : true;
1039
1040 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1041 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1042 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1043 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1044
1045 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1046 (unsigned)(adev->gmc.gart_size >> 20),
1047 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1048
1049 adev->gart.ready = true;
1050
1051 return 0;
1052}
1053
1054static int gmc_v10_0_hw_init(void *handle)
1055{
1056 int r;
1057 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1058
1059
1060 gmc_v10_0_init_golden_registers(adev);
1061
1062
1063
1064
1065
1066 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1067 adev->gfxhub.funcs->utcl2_harvest(adev);
1068
1069 r = gmc_v10_0_gart_enable(adev);
1070 if (r)
1071 return r;
1072
1073 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1074 adev->umc.funcs->init_registers(adev);
1075
1076 return 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1087{
1088 adev->gfxhub.funcs->gart_disable(adev);
1089 adev->mmhub.funcs->gart_disable(adev);
1090 amdgpu_gart_table_vram_unpin(adev);
1091}
1092
1093static int gmc_v10_0_hw_fini(void *handle)
1094{
1095 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1096
1097 if (amdgpu_sriov_vf(adev)) {
1098
1099 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1100 return 0;
1101 }
1102
1103 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1104 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1105 gmc_v10_0_gart_disable(adev);
1106
1107 return 0;
1108}
1109
1110static int gmc_v10_0_suspend(void *handle)
1111{
1112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1113
1114 gmc_v10_0_hw_fini(adev);
1115
1116 return 0;
1117}
1118
1119static int gmc_v10_0_resume(void *handle)
1120{
1121 int r;
1122 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1123
1124 r = gmc_v10_0_hw_init(adev);
1125 if (r)
1126 return r;
1127
1128 amdgpu_vmid_reset_all(adev);
1129
1130 return 0;
1131}
1132
1133static bool gmc_v10_0_is_idle(void *handle)
1134{
1135
1136 return true;
1137}
1138
1139static int gmc_v10_0_wait_for_idle(void *handle)
1140{
1141
1142 return 0;
1143}
1144
1145static int gmc_v10_0_soft_reset(void *handle)
1146{
1147 return 0;
1148}
1149
1150static int gmc_v10_0_set_clockgating_state(void *handle,
1151 enum amd_clockgating_state state)
1152{
1153 int r;
1154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1155
1156 r = adev->mmhub.funcs->set_clockgating(adev, state);
1157 if (r)
1158 return r;
1159
1160 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1161 adev->asic_type <= CHIP_YELLOW_CARP)
1162 return athub_v2_1_set_clockgating(adev, state);
1163 else
1164 return athub_v2_0_set_clockgating(adev, state);
1165}
1166
1167static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1168{
1169 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1170
1171 adev->mmhub.funcs->get_clockgating(adev, flags);
1172
1173 if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
1174 adev->asic_type <= CHIP_YELLOW_CARP)
1175 athub_v2_1_get_clockgating(adev, flags);
1176 else
1177 athub_v2_0_get_clockgating(adev, flags);
1178}
1179
1180static int gmc_v10_0_set_powergating_state(void *handle,
1181 enum amd_powergating_state state)
1182{
1183 return 0;
1184}
1185
1186const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1187 .name = "gmc_v10_0",
1188 .early_init = gmc_v10_0_early_init,
1189 .late_init = gmc_v10_0_late_init,
1190 .sw_init = gmc_v10_0_sw_init,
1191 .sw_fini = gmc_v10_0_sw_fini,
1192 .hw_init = gmc_v10_0_hw_init,
1193 .hw_fini = gmc_v10_0_hw_fini,
1194 .suspend = gmc_v10_0_suspend,
1195 .resume = gmc_v10_0_resume,
1196 .is_idle = gmc_v10_0_is_idle,
1197 .wait_for_idle = gmc_v10_0_wait_for_idle,
1198 .soft_reset = gmc_v10_0_soft_reset,
1199 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1200 .set_powergating_state = gmc_v10_0_set_powergating_state,
1201 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1202};
1203
1204const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1205{
1206 .type = AMD_IP_BLOCK_TYPE_GMC,
1207 .major = 10,
1208 .minor = 0,
1209 .rev = 0,
1210 .funcs = &gmc_v10_0_ip_funcs,
1211};
1212