1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v11_0.h"
28#include "umc_v8_7.h"
29#include "athub/athub_3_0_0_sh_mask.h"
30#include "athub/athub_3_0_0_offset.h"
31#include "oss/osssys_6_0_0_offset.h"
32#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
33#include "navi10_enum.h"
34#include "soc15.h"
35#include "soc15d.h"
36#include "soc15_common.h"
37#include "nbio_v4_3.h"
38#include "gfxhub_v3_0.h"
39#include "mmhub_v3_0.h"
40#include "mmhub_v3_0_2.h"
41#include "athub_v3_0.h"
42
43
44static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
45 struct amdgpu_irq_src *src,
46 unsigned type,
47 enum amdgpu_interrupt_state state)
48{
49 return 0;
50}
51
52static int
53gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
54 struct amdgpu_irq_src *src, unsigned type,
55 enum amdgpu_interrupt_state state)
56{
57 switch (state) {
58 case AMDGPU_IRQ_STATE_DISABLE:
59
60 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
61
62 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
63 break;
64 case AMDGPU_IRQ_STATE_ENABLE:
65
66 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
67
68 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
69 break;
70 default:
71 break;
72 }
73
74 return 0;
75}
76
77static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
78 struct amdgpu_irq_src *source,
79 struct amdgpu_iv_entry *entry)
80{
81 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
82 uint32_t status = 0;
83 u64 addr;
84
85 addr = (u64)entry->src_data[0] << 12;
86 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
87
88 if (!amdgpu_sriov_vf(adev)) {
89
90
91
92
93
94 if (entry->vmid_src == AMDGPU_GFXHUB_0)
95 RREG32(hub->vm_l2_pro_fault_status);
96
97 status = RREG32(hub->vm_l2_pro_fault_status);
98 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
99 }
100
101 if (printk_ratelimit()) {
102 struct amdgpu_task_info task_info;
103
104 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
105 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
106
107 dev_err(adev->dev,
108 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
109 "for process %s pid %d thread %s pid %d)\n",
110 entry->vmid_src ? "mmhub" : "gfxhub",
111 entry->src_id, entry->ring_id, entry->vmid,
112 entry->pasid, task_info.process_name, task_info.tgid,
113 task_info.task_name, task_info.pid);
114 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
115 addr, entry->client_id);
116 if (!amdgpu_sriov_vf(adev))
117 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
118 }
119
120 return 0;
121}
122
123static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
124 .set = gmc_v11_0_vm_fault_interrupt_state,
125 .process = gmc_v11_0_process_interrupt,
126};
127
128static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
129 .set = gmc_v11_0_ecc_interrupt_state,
130 .process = amdgpu_umc_process_ecc_irq,
131};
132
133static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
134{
135 adev->gmc.vm_fault.num_types = 1;
136 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
137
138 if (!amdgpu_sriov_vf(adev)) {
139 adev->gmc.ecc_irq.num_types = 1;
140 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
141 }
142}
143
144
145
146
147
148
149
150
151static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
152 uint32_t vmhub)
153{
154 return ((vmhub == AMDGPU_MMHUB_0) &&
155 (!amdgpu_sriov_vf(adev)));
156}
157
158static bool gmc_v11_0_get_vmid_pasid_mapping_info(
159 struct amdgpu_device *adev,
160 uint8_t vmid, uint16_t *p_pasid)
161{
162 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
163
164 return !!(*p_pasid);
165}
166
167
168
169
170
171
172
173
174static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
175 unsigned int vmhub, uint32_t flush_type)
176{
177 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
178 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
179 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
180 u32 tmp;
181
182 const unsigned eng = 17;
183 unsigned int i;
184
185 spin_lock(&adev->gmc.invalidate_lock);
186
187
188
189
190
191
192
193
194 if (use_semaphore) {
195 for (i = 0; i < adev->usec_timeout; i++) {
196
197 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
198 hub->eng_distance * eng);
199 if (tmp & 0x1)
200 break;
201 udelay(1);
202 }
203
204 if (i >= adev->usec_timeout)
205 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
206 }
207
208 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
209
210
211 for (i = 0; i < adev->usec_timeout; i++) {
212 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
213 hub->eng_distance * eng);
214 tmp &= 1 << vmid;
215 if (tmp)
216 break;
217
218 udelay(1);
219 }
220
221
222 if (use_semaphore)
223
224
225
226
227 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
228 hub->eng_distance * eng, 0);
229
230
231 if ((vmhub != AMDGPU_GFXHUB_0) &&
232 (hub->vm_l2_bank_select_reserved_cid2)) {
233 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
234
235 inv_req |= (1 << 25);
236
237 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
238
239 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
240 }
241
242 spin_unlock(&adev->gmc.invalidate_lock);
243
244 if (i < adev->usec_timeout)
245 return;
246
247 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
248}
249
250
251
252
253
254
255
256
257
258static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
259 uint32_t vmhub, uint32_t flush_type)
260{
261 if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
262 return;
263
264
265 adev->hdp.funcs->flush_hdp(adev, NULL);
266
267
268
269
270 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
271 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
272 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
273 const unsigned eng = 17;
274 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
275 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
276 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
277
278 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
279 1 << vmid);
280 return;
281 }
282
283 mutex_lock(&adev->mman.gtt_window_lock);
284 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
285 mutex_unlock(&adev->mman.gtt_window_lock);
286 return;
287}
288
289
290
291
292
293
294
295
296
297static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
298 uint16_t pasid, uint32_t flush_type,
299 bool all_hub)
300{
301 int vmid, i;
302 signed long r;
303 uint32_t seq;
304 uint16_t queried_pasid;
305 bool ret;
306 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
307 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
308
309 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
310 spin_lock(&adev->gfx.kiq.ring_lock);
311
312 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
313 kiq->pmf->kiq_invalidate_tlbs(ring,
314 pasid, flush_type, all_hub);
315 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
316 if (r) {
317 amdgpu_ring_undo(ring);
318 spin_unlock(&adev->gfx.kiq.ring_lock);
319 return -ETIME;
320 }
321
322 amdgpu_ring_commit(ring);
323 spin_unlock(&adev->gfx.kiq.ring_lock);
324 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
325 if (r < 1) {
326 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
327 return -ETIME;
328 }
329
330 return 0;
331 }
332
333 for (vmid = 1; vmid < 16; vmid++) {
334
335 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
336 &queried_pasid);
337 if (ret && queried_pasid == pasid) {
338 if (all_hub) {
339 for (i = 0; i < adev->num_vmhubs; i++)
340 gmc_v11_0_flush_gpu_tlb(adev, vmid,
341 i, flush_type);
342 } else {
343 gmc_v11_0_flush_gpu_tlb(adev, vmid,
344 AMDGPU_GFXHUB_0, flush_type);
345 }
346 break;
347 }
348 }
349
350 return 0;
351}
352
353static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
354 unsigned vmid, uint64_t pd_addr)
355{
356 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
357 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
358 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
359 unsigned eng = ring->vm_inv_eng;
360
361
362
363
364
365
366
367
368
369 if (use_semaphore)
370
371 amdgpu_ring_emit_reg_wait(ring,
372 hub->vm_inv_eng0_sem +
373 hub->eng_distance * eng, 0x1, 0x1);
374
375 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
376 (hub->ctx_addr_distance * vmid),
377 lower_32_bits(pd_addr));
378
379 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
380 (hub->ctx_addr_distance * vmid),
381 upper_32_bits(pd_addr));
382
383 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
384 hub->eng_distance * eng,
385 hub->vm_inv_eng0_ack +
386 hub->eng_distance * eng,
387 req, 1 << vmid);
388
389
390 if (use_semaphore)
391
392
393
394
395 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
396 hub->eng_distance * eng, 0);
397
398 return pd_addr;
399}
400
401static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
402 unsigned pasid)
403{
404 struct amdgpu_device *adev = ring->adev;
405 uint32_t reg;
406
407
408 if (ring->is_mes_queue)
409 return;
410
411 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
412 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
413 else
414 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
415
416 amdgpu_ring_emit_wreg(ring, reg, pasid);
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
452{
453 switch (flags) {
454 case AMDGPU_VM_MTYPE_DEFAULT:
455 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
456 case AMDGPU_VM_MTYPE_NC:
457 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
458 case AMDGPU_VM_MTYPE_WC:
459 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
460 case AMDGPU_VM_MTYPE_CC:
461 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
462 case AMDGPU_VM_MTYPE_UC:
463 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
464 default:
465 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
466 }
467}
468
469static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
470 uint64_t *addr, uint64_t *flags)
471{
472 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
473 *addr = adev->vm_manager.vram_base_offset + *addr -
474 adev->gmc.vram_start;
475 BUG_ON(*addr & 0xFFFF00000000003FULL);
476
477 if (!adev->gmc.translate_further)
478 return;
479
480 if (level == AMDGPU_VM_PDB1) {
481
482 if (!(*flags & AMDGPU_PDE_PTE))
483 *flags |= AMDGPU_PDE_BFS(0x9);
484
485 } else if (level == AMDGPU_VM_PDB0) {
486 if (*flags & AMDGPU_PDE_PTE)
487 *flags &= ~AMDGPU_PDE_PTE;
488 else
489 *flags |= AMDGPU_PTE_TF;
490 }
491}
492
493static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
494 struct amdgpu_bo_va_mapping *mapping,
495 uint64_t *flags)
496{
497 *flags &= ~AMDGPU_PTE_EXECUTABLE;
498 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
499
500 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
501 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
502
503 *flags &= ~AMDGPU_PTE_NOALLOC;
504 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
505
506 if (mapping->flags & AMDGPU_PTE_PRT) {
507 *flags |= AMDGPU_PTE_PRT;
508 *flags |= AMDGPU_PTE_SNOOPED;
509 *flags |= AMDGPU_PTE_LOG;
510 *flags |= AMDGPU_PTE_SYSTEM;
511 *flags &= ~AMDGPU_PTE_VALID;
512 }
513}
514
515static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
516{
517 return 0;
518}
519
520static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
521 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
522 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
523 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
524 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
525 .map_mtype = gmc_v11_0_map_mtype,
526 .get_vm_pde = gmc_v11_0_get_vm_pde,
527 .get_vm_pte = gmc_v11_0_get_vm_pte,
528 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
529};
530
531static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
532{
533 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
534}
535
536static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
537{
538 switch (adev->ip_versions[UMC_HWIP][0]) {
539 case IP_VERSION(8, 10, 0):
540 case IP_VERSION(8, 11, 0):
541 break;
542 default:
543 break;
544 }
545}
546
547
548static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
549{
550 switch (adev->ip_versions[MMHUB_HWIP][0]) {
551 case IP_VERSION(3, 0, 2):
552 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
553 break;
554 default:
555 adev->mmhub.funcs = &mmhub_v3_0_funcs;
556 break;
557 }
558}
559
560static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
561{
562 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
563}
564
565static int gmc_v11_0_early_init(void *handle)
566{
567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
568
569 gmc_v11_0_set_gfxhub_funcs(adev);
570 gmc_v11_0_set_mmhub_funcs(adev);
571 gmc_v11_0_set_gmc_funcs(adev);
572 gmc_v11_0_set_irq_funcs(adev);
573 gmc_v11_0_set_umc_funcs(adev);
574
575 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
576 adev->gmc.shared_aperture_end =
577 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
578 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
579 adev->gmc.private_aperture_end =
580 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
581
582 return 0;
583}
584
585static int gmc_v11_0_late_init(void *handle)
586{
587 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
588 int r;
589
590 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
591 if (r)
592 return r;
593
594 r = amdgpu_gmc_ras_late_init(adev);
595 if (r)
596 return r;
597
598 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
599}
600
601static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
602 struct amdgpu_gmc *mc)
603{
604 u64 base = 0;
605
606 base = adev->mmhub.funcs->get_fb_location(adev);
607
608 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
609 amdgpu_gmc_gart_location(adev, mc);
610
611
612 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
613}
614
615
616
617
618
619
620
621
622
623
624static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
625{
626 int r;
627
628
629 adev->gmc.mc_vram_size =
630 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
631 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
632
633 if (!(adev->flags & AMD_IS_APU)) {
634 r = amdgpu_device_resize_fb_bar(adev);
635 if (r)
636 return r;
637 }
638 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
639 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
640
641#ifdef CONFIG_X86_64
642 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
643 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
644 adev->gmc.aper_size = adev->gmc.real_vram_size;
645 }
646#endif
647
648 adev->gmc.visible_vram_size = adev->gmc.aper_size;
649 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
650 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
651
652
653 if (amdgpu_gart_size == -1) {
654 adev->gmc.gart_size = 512ULL << 20;
655 } else
656 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
657
658 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
659
660 return 0;
661}
662
663static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
664{
665 int r;
666
667 if (adev->gart.bo) {
668 WARN(1, "PCIE GART already initialized\n");
669 return 0;
670 }
671
672
673 r = amdgpu_gart_init(adev);
674 if (r)
675 return r;
676
677 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
678 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
679 AMDGPU_PTE_EXECUTABLE;
680
681 return amdgpu_gart_table_vram_alloc(adev);
682}
683
684static int gmc_v11_0_sw_init(void *handle)
685{
686 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
687 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
688
689 adev->mmhub.funcs->init(adev);
690
691 spin_lock_init(&adev->gmc.invalidate_lock);
692
693 r = amdgpu_atomfirmware_get_vram_info(adev,
694 &vram_width, &vram_type, &vram_vendor);
695 adev->gmc.vram_width = vram_width;
696
697 adev->gmc.vram_type = vram_type;
698 adev->gmc.vram_vendor = vram_vendor;
699
700 switch (adev->ip_versions[GC_HWIP][0]) {
701 case IP_VERSION(11, 0, 0):
702 case IP_VERSION(11, 0, 1):
703 case IP_VERSION(11, 0, 2):
704 adev->num_vmhubs = 2;
705
706
707
708
709
710 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
711 break;
712 default:
713 break;
714 }
715
716
717 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
718 VMC_1_0__SRCID__VM_FAULT,
719 &adev->gmc.vm_fault);
720
721 if (r)
722 return r;
723
724 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
725 UTCL2_1_0__SRCID__FAULT,
726 &adev->gmc.vm_fault);
727 if (r)
728 return r;
729
730 if (!amdgpu_sriov_vf(adev)) {
731
732 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
733 &adev->gmc.ecc_irq);
734 if (r)
735 return r;
736 }
737
738
739
740
741
742 adev->gmc.mc_mask = 0xffffffffffffULL;
743
744 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
745 if (r) {
746 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
747 return r;
748 }
749
750 r = gmc_v11_0_mc_init(adev);
751 if (r)
752 return r;
753
754 amdgpu_gmc_get_vbios_allocations(adev);
755
756
757 r = amdgpu_bo_init(adev);
758 if (r)
759 return r;
760
761 r = gmc_v11_0_gart_init(adev);
762 if (r)
763 return r;
764
765
766
767
768
769
770
771 adev->vm_manager.first_kfd_vmid = 8;
772
773 amdgpu_vm_manager_init(adev);
774
775 return 0;
776}
777
778
779
780
781
782
783
784
785static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
786{
787 amdgpu_gart_table_vram_free(adev);
788}
789
790static int gmc_v11_0_sw_fini(void *handle)
791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793
794 amdgpu_vm_manager_fini(adev);
795 gmc_v11_0_gart_fini(adev);
796 amdgpu_gem_force_release(adev);
797 amdgpu_bo_fini(adev);
798
799 return 0;
800}
801
802static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
803{
804}
805
806
807
808
809
810
811static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
812{
813 int r;
814 bool value;
815
816 if (adev->gart.bo == NULL) {
817 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
818 return -EINVAL;
819 }
820
821 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
822
823 r = adev->mmhub.funcs->gart_enable(adev);
824 if (r)
825 return r;
826
827
828 adev->hdp.funcs->flush_hdp(adev, NULL);
829
830 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
831 false : true;
832
833 adev->mmhub.funcs->set_fault_enable_default(adev, value);
834 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
835
836 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
837 (unsigned)(adev->gmc.gart_size >> 20),
838 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
839
840 return 0;
841}
842
843static int gmc_v11_0_hw_init(void *handle)
844{
845 int r;
846 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
847
848
849 gmc_v11_0_init_golden_registers(adev);
850
851 r = gmc_v11_0_gart_enable(adev);
852 if (r)
853 return r;
854
855 if (adev->umc.funcs && adev->umc.funcs->init_registers)
856 adev->umc.funcs->init_registers(adev);
857
858 return 0;
859}
860
861
862
863
864
865
866
867
868static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
869{
870 adev->mmhub.funcs->gart_disable(adev);
871}
872
873static int gmc_v11_0_hw_fini(void *handle)
874{
875 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
876
877 if (amdgpu_sriov_vf(adev)) {
878
879 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
880 return 0;
881 }
882
883 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
884 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
885 gmc_v11_0_gart_disable(adev);
886
887 return 0;
888}
889
890static int gmc_v11_0_suspend(void *handle)
891{
892 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
893
894 gmc_v11_0_hw_fini(adev);
895
896 return 0;
897}
898
899static int gmc_v11_0_resume(void *handle)
900{
901 int r;
902 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
903
904 r = gmc_v11_0_hw_init(adev);
905 if (r)
906 return r;
907
908 amdgpu_vmid_reset_all(adev);
909
910 return 0;
911}
912
913static bool gmc_v11_0_is_idle(void *handle)
914{
915
916 return true;
917}
918
919static int gmc_v11_0_wait_for_idle(void *handle)
920{
921
922 return 0;
923}
924
925static int gmc_v11_0_soft_reset(void *handle)
926{
927 return 0;
928}
929
930static int gmc_v11_0_set_clockgating_state(void *handle,
931 enum amd_clockgating_state state)
932{
933 int r;
934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
935
936 r = adev->mmhub.funcs->set_clockgating(adev, state);
937 if (r)
938 return r;
939
940 return athub_v3_0_set_clockgating(adev, state);
941}
942
943static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
944{
945 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
946
947 adev->mmhub.funcs->get_clockgating(adev, flags);
948
949 athub_v3_0_get_clockgating(adev, flags);
950}
951
952static int gmc_v11_0_set_powergating_state(void *handle,
953 enum amd_powergating_state state)
954{
955 return 0;
956}
957
958const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
959 .name = "gmc_v11_0",
960 .early_init = gmc_v11_0_early_init,
961 .sw_init = gmc_v11_0_sw_init,
962 .hw_init = gmc_v11_0_hw_init,
963 .late_init = gmc_v11_0_late_init,
964 .sw_fini = gmc_v11_0_sw_fini,
965 .hw_fini = gmc_v11_0_hw_fini,
966 .suspend = gmc_v11_0_suspend,
967 .resume = gmc_v11_0_resume,
968 .is_idle = gmc_v11_0_is_idle,
969 .wait_for_idle = gmc_v11_0_wait_for_idle,
970 .soft_reset = gmc_v11_0_soft_reset,
971 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
972 .set_powergating_state = gmc_v11_0_set_powergating_state,
973 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
974};
975
976const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
977 .type = AMD_IP_BLOCK_TYPE_GMC,
978 .major = 11,
979 .minor = 0,
980 .rev = 0,
981 .funcs = &gmc_v11_0_ip_funcs,
982};
983