1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "amdgpu_amdkfd.h"
24#include "amd_shared.h"
25
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "amdgpu_dma_buf.h"
29#include <linux/module.h>
30#include <linux/dma-buf.h>
31#include "amdgpu_xgmi.h"
32
33static const unsigned int compute_vmid_bitmap = 0xFF00;
34
35
36
37
38uint64_t amdgpu_amdkfd_total_mem_size;
39
40int amdgpu_amdkfd_init(void)
41{
42 struct sysinfo si;
43 int ret;
44
45 si_meminfo(&si);
46 amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
47 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
48
49#ifdef CONFIG_HSA_AMD
50 ret = kgd2kfd_init();
51 amdgpu_amdkfd_gpuvm_init_mem_limits();
52#else
53 ret = -ENOENT;
54#endif
55
56 return ret;
57}
58
59void amdgpu_amdkfd_fini(void)
60{
61 kgd2kfd_exit();
62}
63
64void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
65{
66 const struct kfd2kgd_calls *kfd2kgd;
67
68 switch (adev->asic_type) {
69#ifdef CONFIG_DRM_AMDGPU_CIK
70 case CHIP_KAVERI:
71 case CHIP_HAWAII:
72 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
73 break;
74#endif
75 case CHIP_CARRIZO:
76 case CHIP_TONGA:
77 case CHIP_FIJI:
78 case CHIP_POLARIS10:
79 case CHIP_POLARIS11:
80 case CHIP_POLARIS12:
81 case CHIP_VEGAM:
82 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
83 break;
84 case CHIP_VEGA10:
85 case CHIP_VEGA12:
86 case CHIP_VEGA20:
87 case CHIP_RAVEN:
88 kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
89 break;
90 case CHIP_NAVI10:
91 kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
92 break;
93 default:
94 dev_info(adev->dev, "kfd not supported on this ASIC\n");
95 return;
96 }
97
98 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
99 adev->pdev, kfd2kgd);
100
101 if (adev->kfd.dev)
102 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
103}
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
119 phys_addr_t *aperture_base,
120 size_t *aperture_size,
121 size_t *start_offset)
122{
123
124
125
126
127 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
128 *aperture_base = adev->doorbell.base;
129 *aperture_size = adev->doorbell.size;
130 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
131 } else {
132 *aperture_base = 0;
133 *aperture_size = 0;
134 *start_offset = 0;
135 }
136}
137
138void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
139{
140 int i;
141 int last_valid_bit;
142
143 if (adev->kfd.dev) {
144 struct kgd2kfd_shared_resources gpu_resources = {
145 .compute_vmid_bitmap = compute_vmid_bitmap,
146 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
147 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
148 .gpuvm_size = min(adev->vm_manager.max_pfn
149 << AMDGPU_GPU_PAGE_SHIFT,
150 AMDGPU_GMC_HOLE_START),
151 .drm_render_minor = adev->ddev->render->index,
152 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
153
154 };
155
156
157
158
159 bitmap_complement(gpu_resources.queue_bitmap,
160 adev->gfx.mec.queue_bitmap,
161 KGD_MAX_QUEUES);
162
163
164 if (adev->gfx.kiq.ring.sched.ready)
165 clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
166 adev->gfx.kiq.ring.me - 1,
167 adev->gfx.kiq.ring.pipe,
168 adev->gfx.kiq.ring.queue),
169 gpu_resources.queue_bitmap);
170
171
172
173
174 last_valid_bit = 1
175 * adev->gfx.mec.num_pipe_per_mec
176 * adev->gfx.mec.num_queue_per_pipe;
177 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
178 clear_bit(i, gpu_resources.queue_bitmap);
179
180 amdgpu_doorbell_get_kfd_info(adev,
181 &gpu_resources.doorbell_physical_address,
182 &gpu_resources.doorbell_aperture_size,
183 &gpu_resources.doorbell_start_offset);
184
185
186
187
188
189
190
191
192
193 if (adev->asic_type >= CHIP_VEGA10) {
194 gpu_resources.non_cp_doorbells_start =
195 adev->doorbell_index.first_non_cp;
196 gpu_resources.non_cp_doorbells_end =
197 adev->doorbell_index.last_non_cp;
198 }
199
200 kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
201 }
202}
203
204void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
205{
206 if (adev->kfd.dev) {
207 kgd2kfd_device_exit(adev->kfd.dev);
208 adev->kfd.dev = NULL;
209 }
210}
211
212void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
213 const void *ih_ring_entry)
214{
215 if (adev->kfd.dev)
216 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
217}
218
219void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
220{
221 if (adev->kfd.dev)
222 kgd2kfd_suspend(adev->kfd.dev);
223}
224
225int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
226{
227 int r = 0;
228
229 if (adev->kfd.dev)
230 r = kgd2kfd_resume(adev->kfd.dev);
231
232 return r;
233}
234
235int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
236{
237 int r = 0;
238
239 if (adev->kfd.dev)
240 r = kgd2kfd_pre_reset(adev->kfd.dev);
241
242 return r;
243}
244
245int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
246{
247 int r = 0;
248
249 if (adev->kfd.dev)
250 r = kgd2kfd_post_reset(adev->kfd.dev);
251
252 return r;
253}
254
255void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
256{
257 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
258
259 if (amdgpu_device_should_recover_gpu(adev))
260 amdgpu_device_gpu_recover(adev, NULL);
261}
262
263int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
264 void **mem_obj, uint64_t *gpu_addr,
265 void **cpu_ptr, bool mqd_gfx9)
266{
267 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
268 struct amdgpu_bo *bo = NULL;
269 struct amdgpu_bo_param bp;
270 int r;
271 void *cpu_ptr_tmp = NULL;
272
273 memset(&bp, 0, sizeof(bp));
274 bp.size = size;
275 bp.byte_align = PAGE_SIZE;
276 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
277 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
278 bp.type = ttm_bo_type_kernel;
279 bp.resv = NULL;
280
281 if (mqd_gfx9)
282 bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
283
284 r = amdgpu_bo_create(adev, &bp, &bo);
285 if (r) {
286 dev_err(adev->dev,
287 "failed to allocate BO for amdkfd (%d)\n", r);
288 return r;
289 }
290
291
292 r = amdgpu_bo_reserve(bo, true);
293 if (r) {
294 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
295 goto allocate_mem_reserve_bo_failed;
296 }
297
298 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
299 if (r) {
300 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
301 goto allocate_mem_pin_bo_failed;
302 }
303
304 r = amdgpu_ttm_alloc_gart(&bo->tbo);
305 if (r) {
306 dev_err(adev->dev, "%p bind failed\n", bo);
307 goto allocate_mem_kmap_bo_failed;
308 }
309
310 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
311 if (r) {
312 dev_err(adev->dev,
313 "(%d) failed to map bo to kernel for amdkfd\n", r);
314 goto allocate_mem_kmap_bo_failed;
315 }
316
317 *mem_obj = bo;
318 *gpu_addr = amdgpu_bo_gpu_offset(bo);
319 *cpu_ptr = cpu_ptr_tmp;
320
321 amdgpu_bo_unreserve(bo);
322
323 return 0;
324
325allocate_mem_kmap_bo_failed:
326 amdgpu_bo_unpin(bo);
327allocate_mem_pin_bo_failed:
328 amdgpu_bo_unreserve(bo);
329allocate_mem_reserve_bo_failed:
330 amdgpu_bo_unref(&bo);
331
332 return r;
333}
334
335void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
336{
337 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
338
339 amdgpu_bo_reserve(bo, true);
340 amdgpu_bo_kunmap(bo);
341 amdgpu_bo_unpin(bo);
342 amdgpu_bo_unreserve(bo);
343 amdgpu_bo_unref(&(bo));
344}
345
346int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
347 void **mem_obj)
348{
349 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
350 struct amdgpu_bo *bo = NULL;
351 struct amdgpu_bo_param bp;
352 int r;
353
354 memset(&bp, 0, sizeof(bp));
355 bp.size = size;
356 bp.byte_align = 1;
357 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
358 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
359 bp.type = ttm_bo_type_device;
360 bp.resv = NULL;
361
362 r = amdgpu_bo_create(adev, &bp, &bo);
363 if (r) {
364 dev_err(adev->dev,
365 "failed to allocate gws BO for amdkfd (%d)\n", r);
366 return r;
367 }
368
369 *mem_obj = bo;
370 return 0;
371}
372
373void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
374{
375 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
376
377 amdgpu_bo_unref(&bo);
378}
379
380uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
381 enum kgd_engine_type type)
382{
383 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
384
385 switch (type) {
386 case KGD_ENGINE_PFP:
387 return adev->gfx.pfp_fw_version;
388
389 case KGD_ENGINE_ME:
390 return adev->gfx.me_fw_version;
391
392 case KGD_ENGINE_CE:
393 return adev->gfx.ce_fw_version;
394
395 case KGD_ENGINE_MEC1:
396 return adev->gfx.mec_fw_version;
397
398 case KGD_ENGINE_MEC2:
399 return adev->gfx.mec2_fw_version;
400
401 case KGD_ENGINE_RLC:
402 return adev->gfx.rlc_fw_version;
403
404 case KGD_ENGINE_SDMA1:
405 return adev->sdma.instance[0].fw_version;
406
407 case KGD_ENGINE_SDMA2:
408 return adev->sdma.instance[1].fw_version;
409
410 default:
411 return 0;
412 }
413
414 return 0;
415}
416
417void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
418 struct kfd_local_mem_info *mem_info)
419{
420 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
421 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
422 ~((1ULL << 32) - 1);
423 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
424
425 memset(mem_info, 0, sizeof(*mem_info));
426 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
427 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
428 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
429 adev->gmc.visible_vram_size;
430 } else {
431 mem_info->local_mem_size_public = 0;
432 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
433 }
434 mem_info->vram_width = adev->gmc.vram_width;
435
436 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
437 &adev->gmc.aper_base, &aper_limit,
438 mem_info->local_mem_size_public,
439 mem_info->local_mem_size_private);
440
441 if (amdgpu_sriov_vf(adev))
442 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
443 else if (adev->powerplay.pp_funcs) {
444 if (amdgpu_emu_mode == 1)
445 mem_info->mem_clk_max = 0;
446 else
447 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
448 } else
449 mem_info->mem_clk_max = 100;
450}
451
452uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
453{
454 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
455
456 if (adev->gfx.funcs->get_gpu_clock_counter)
457 return adev->gfx.funcs->get_gpu_clock_counter(adev);
458 return 0;
459}
460
461uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
462{
463 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
464
465
466 if (amdgpu_sriov_vf(adev))
467 return adev->clock.default_sclk / 100;
468 else if (adev->powerplay.pp_funcs)
469 return amdgpu_dpm_get_sclk(adev, false) / 100;
470 else
471 return 100;
472}
473
474void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
475{
476 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
477 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
478
479 memset(cu_info, 0, sizeof(*cu_info));
480 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
481 return;
482
483 cu_info->cu_active_number = acu_info.number;
484 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
485 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
486 sizeof(acu_info.bitmap));
487 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
488 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
489 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
490 cu_info->simd_per_cu = acu_info.simd_per_cu;
491 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
492 cu_info->wave_front_size = acu_info.wave_front_size;
493 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
494 cu_info->lds_size = acu_info.lds_size;
495}
496
497int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
498 struct kgd_dev **dma_buf_kgd,
499 uint64_t *bo_size, void *metadata_buffer,
500 size_t buffer_size, uint32_t *metadata_size,
501 uint32_t *flags)
502{
503 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
504 struct dma_buf *dma_buf;
505 struct drm_gem_object *obj;
506 struct amdgpu_bo *bo;
507 uint64_t metadata_flags;
508 int r = -EINVAL;
509
510 dma_buf = dma_buf_get(dma_buf_fd);
511 if (IS_ERR(dma_buf))
512 return PTR_ERR(dma_buf);
513
514 if (dma_buf->ops != &amdgpu_dmabuf_ops)
515
516 goto out_put;
517
518 obj = dma_buf->priv;
519 if (obj->dev->driver != adev->ddev->driver)
520
521 goto out_put;
522
523 adev = obj->dev->dev_private;
524 bo = gem_to_amdgpu_bo(obj);
525 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
526 AMDGPU_GEM_DOMAIN_GTT)))
527
528 goto out_put;
529
530 r = 0;
531 if (dma_buf_kgd)
532 *dma_buf_kgd = (struct kgd_dev *)adev;
533 if (bo_size)
534 *bo_size = amdgpu_bo_size(bo);
535 if (metadata_size)
536 *metadata_size = bo->metadata_size;
537 if (metadata_buffer)
538 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
539 metadata_size, &metadata_flags);
540 if (flags) {
541 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
542 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
543
544 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
545 *flags |= ALLOC_MEM_FLAGS_PUBLIC;
546 }
547
548out_put:
549 dma_buf_put(dma_buf);
550 return r;
551}
552
553uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
554{
555 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
556
557 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
558}
559
560uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
561{
562 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
563
564 return adev->gmc.xgmi.hive_id;
565}
566uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
567{
568 struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
569 struct amdgpu_device *adev = (struct amdgpu_device *)dst;
570 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
571
572 if (ret < 0) {
573 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
574 adev->gmc.xgmi.physical_node_id,
575 peer_adev->gmc.xgmi.physical_node_id, ret);
576 ret = 0;
577 }
578 return (uint8_t)ret;
579}
580
581uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
582{
583 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
584
585 return adev->rmmio_remap.bus_addr;
586}
587
588uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
589{
590 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
591
592 return adev->gds.gws_size;
593}
594
595int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
596 uint32_t vmid, uint64_t gpu_addr,
597 uint32_t *ib_cmd, uint32_t ib_len)
598{
599 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
600 struct amdgpu_job *job;
601 struct amdgpu_ib *ib;
602 struct amdgpu_ring *ring;
603 struct dma_fence *f = NULL;
604 int ret;
605
606 switch (engine) {
607 case KGD_ENGINE_MEC1:
608 ring = &adev->gfx.compute_ring[0];
609 break;
610 case KGD_ENGINE_SDMA1:
611 ring = &adev->sdma.instance[0].ring;
612 break;
613 case KGD_ENGINE_SDMA2:
614 ring = &adev->sdma.instance[1].ring;
615 break;
616 default:
617 pr_err("Invalid engine in IB submission: %d\n", engine);
618 ret = -EINVAL;
619 goto err;
620 }
621
622 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
623 if (ret)
624 goto err;
625
626 ib = &job->ibs[0];
627 memset(ib, 0, sizeof(struct amdgpu_ib));
628
629 ib->gpu_addr = gpu_addr;
630 ib->ptr = ib_cmd;
631 ib->length_dw = ib_len;
632
633 job->vmid = vmid;
634
635 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
636 if (ret) {
637 DRM_ERROR("amdgpu: failed to schedule IB.\n");
638 goto err_ib_sched;
639 }
640
641 ret = dma_fence_wait(f, false);
642
643err_ib_sched:
644 dma_fence_put(f);
645 amdgpu_job_free(job);
646err:
647 return ret;
648}
649
650void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
651{
652 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
653
654 if (adev->powerplay.pp_funcs &&
655 adev->powerplay.pp_funcs->switch_power_profile)
656 amdgpu_dpm_switch_power_profile(adev,
657 PP_SMC_POWER_PROFILE_COMPUTE,
658 !idle);
659}
660
661bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
662{
663 if (adev->kfd.dev) {
664 if ((1 << vmid) & compute_vmid_bitmap)
665 return true;
666 }
667
668 return false;
669}
670
671bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
672{
673 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
674
675 return adev->have_atomics_support;
676}
677
678#ifndef CONFIG_HSA_AMD
679bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
680{
681 return false;
682}
683
684void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
685{
686}
687
688void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
689 struct amdgpu_vm *vm)
690{
691}
692
693struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
694{
695 return NULL;
696}
697
698int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
699{
700 return 0;
701}
702
703struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
704{
705 return NULL;
706}
707
708struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
709{
710 return NULL;
711}
712
713struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
714{
715 return NULL;
716}
717
718struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
719{
720 return NULL;
721}
722
723struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
724 const struct kfd2kgd_calls *f2g)
725{
726 return NULL;
727}
728
729bool kgd2kfd_device_init(struct kfd_dev *kfd,
730 const struct kgd2kfd_shared_resources *gpu_resources)
731{
732 return false;
733}
734
735void kgd2kfd_device_exit(struct kfd_dev *kfd)
736{
737}
738
739void kgd2kfd_exit(void)
740{
741}
742
743void kgd2kfd_suspend(struct kfd_dev *kfd)
744{
745}
746
747int kgd2kfd_resume(struct kfd_dev *kfd)
748{
749 return 0;
750}
751
752int kgd2kfd_pre_reset(struct kfd_dev *kfd)
753{
754 return 0;
755}
756
757int kgd2kfd_post_reset(struct kfd_dev *kfd)
758{
759 return 0;
760}
761
762void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
763{
764}
765
766void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
767{
768}
769#endif
770