1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include "amdgpu_amdkfd.h"
24#include "amd_shared.h"
25
26#include "amdgpu.h"
27#include "amdgpu_gfx.h"
28#include "amdgpu_dma_buf.h"
29#include <linux/module.h>
30#include <linux/dma-buf.h>
31#include "amdgpu_xgmi.h"
32#include <uapi/linux/kfd_ioctl.h>
33
34
35
36
37uint64_t amdgpu_amdkfd_total_mem_size;
38
39static bool kfd_initialized;
40
41int amdgpu_amdkfd_init(void)
42{
43 struct sysinfo si;
44 int ret;
45
46 si_meminfo(&si);
47 amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
48 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
49
50#ifdef CONFIG_HSA_AMD
51 ret = kgd2kfd_init();
52 amdgpu_amdkfd_gpuvm_init_mem_limits();
53#else
54 ret = -ENOENT;
55#endif
56 kfd_initialized = !ret;
57
58 return ret;
59}
60
61void amdgpu_amdkfd_fini(void)
62{
63 if (kfd_initialized) {
64 kgd2kfd_exit();
65 kfd_initialized = false;
66 }
67}
68
69void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
70{
71 bool vf = amdgpu_sriov_vf(adev);
72
73 if (!kfd_initialized)
74 return;
75
76 adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
77 adev->pdev, adev->asic_type, vf);
78
79 if (adev->kfd.dev)
80 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
97 phys_addr_t *aperture_base,
98 size_t *aperture_size,
99 size_t *start_offset)
100{
101
102
103
104
105 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
106 *aperture_base = adev->doorbell.base;
107 *aperture_size = adev->doorbell.size;
108 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
109 } else {
110 *aperture_base = 0;
111 *aperture_size = 0;
112 *start_offset = 0;
113 }
114}
115
116void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
117{
118 int i;
119 int last_valid_bit;
120
121 if (adev->kfd.dev) {
122 struct kgd2kfd_shared_resources gpu_resources = {
123 .compute_vmid_bitmap =
124 ((1 << AMDGPU_NUM_VMID) - 1) -
125 ((1 << adev->vm_manager.first_kfd_vmid) - 1),
126 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
127 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
128 .gpuvm_size = min(adev->vm_manager.max_pfn
129 << AMDGPU_GPU_PAGE_SHIFT,
130 AMDGPU_GMC_HOLE_START),
131 .drm_render_minor = adev_to_drm(adev)->render->index,
132 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
133
134 };
135
136
137
138
139 bitmap_complement(gpu_resources.cp_queue_bitmap,
140 adev->gfx.mec.queue_bitmap,
141 KGD_MAX_QUEUES);
142
143
144
145
146 last_valid_bit = 1
147 * adev->gfx.mec.num_pipe_per_mec
148 * adev->gfx.mec.num_queue_per_pipe;
149 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
150 clear_bit(i, gpu_resources.cp_queue_bitmap);
151
152 amdgpu_doorbell_get_kfd_info(adev,
153 &gpu_resources.doorbell_physical_address,
154 &gpu_resources.doorbell_aperture_size,
155 &gpu_resources.doorbell_start_offset);
156
157
158
159
160
161
162
163
164
165 if (adev->asic_type >= CHIP_VEGA10) {
166 gpu_resources.non_cp_doorbells_start =
167 adev->doorbell_index.first_non_cp;
168 gpu_resources.non_cp_doorbells_end =
169 adev->doorbell_index.last_non_cp;
170 }
171
172 kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
173 }
174}
175
176void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
177{
178 if (adev->kfd.dev) {
179 kgd2kfd_device_exit(adev->kfd.dev);
180 adev->kfd.dev = NULL;
181 }
182}
183
184void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
185 const void *ih_ring_entry)
186{
187 if (adev->kfd.dev)
188 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
189}
190
191void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
192{
193 if (adev->kfd.dev)
194 kgd2kfd_suspend(adev->kfd.dev, run_pm);
195}
196
197int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
198{
199 int r = 0;
200
201 if (adev->kfd.dev)
202 r = kgd2kfd_resume(adev->kfd.dev, run_pm);
203
204 return r;
205}
206
207int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
208{
209 int r = 0;
210
211 if (adev->kfd.dev)
212 r = kgd2kfd_pre_reset(adev->kfd.dev);
213
214 return r;
215}
216
217int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
218{
219 int r = 0;
220
221 if (adev->kfd.dev)
222 r = kgd2kfd_post_reset(adev->kfd.dev);
223
224 return r;
225}
226
227void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
228{
229 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
230
231 if (amdgpu_device_should_recover_gpu(adev))
232 amdgpu_device_gpu_recover(adev, NULL);
233}
234
235int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
236 void **mem_obj, uint64_t *gpu_addr,
237 void **cpu_ptr, bool cp_mqd_gfx9)
238{
239 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
240 struct amdgpu_bo *bo = NULL;
241 struct amdgpu_bo_param bp;
242 int r;
243 void *cpu_ptr_tmp = NULL;
244
245 memset(&bp, 0, sizeof(bp));
246 bp.size = size;
247 bp.byte_align = PAGE_SIZE;
248 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
249 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
250 bp.type = ttm_bo_type_kernel;
251 bp.resv = NULL;
252
253 if (cp_mqd_gfx9)
254 bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
255
256 r = amdgpu_bo_create(adev, &bp, &bo);
257 if (r) {
258 dev_err(adev->dev,
259 "failed to allocate BO for amdkfd (%d)\n", r);
260 return r;
261 }
262
263
264 r = amdgpu_bo_reserve(bo, true);
265 if (r) {
266 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
267 goto allocate_mem_reserve_bo_failed;
268 }
269
270 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
271 if (r) {
272 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
273 goto allocate_mem_pin_bo_failed;
274 }
275
276 r = amdgpu_ttm_alloc_gart(&bo->tbo);
277 if (r) {
278 dev_err(adev->dev, "%p bind failed\n", bo);
279 goto allocate_mem_kmap_bo_failed;
280 }
281
282 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
283 if (r) {
284 dev_err(adev->dev,
285 "(%d) failed to map bo to kernel for amdkfd\n", r);
286 goto allocate_mem_kmap_bo_failed;
287 }
288
289 *mem_obj = bo;
290 *gpu_addr = amdgpu_bo_gpu_offset(bo);
291 *cpu_ptr = cpu_ptr_tmp;
292
293 amdgpu_bo_unreserve(bo);
294
295 return 0;
296
297allocate_mem_kmap_bo_failed:
298 amdgpu_bo_unpin(bo);
299allocate_mem_pin_bo_failed:
300 amdgpu_bo_unreserve(bo);
301allocate_mem_reserve_bo_failed:
302 amdgpu_bo_unref(&bo);
303
304 return r;
305}
306
307void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
308{
309 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
310
311 amdgpu_bo_reserve(bo, true);
312 amdgpu_bo_kunmap(bo);
313 amdgpu_bo_unpin(bo);
314 amdgpu_bo_unreserve(bo);
315 amdgpu_bo_unref(&(bo));
316}
317
318int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
319 void **mem_obj)
320{
321 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
322 struct amdgpu_bo *bo = NULL;
323 struct amdgpu_bo_param bp;
324 int r;
325
326 memset(&bp, 0, sizeof(bp));
327 bp.size = size;
328 bp.byte_align = 1;
329 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
330 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
331 bp.type = ttm_bo_type_device;
332 bp.resv = NULL;
333
334 r = amdgpu_bo_create(adev, &bp, &bo);
335 if (r) {
336 dev_err(adev->dev,
337 "failed to allocate gws BO for amdkfd (%d)\n", r);
338 return r;
339 }
340
341 *mem_obj = bo;
342 return 0;
343}
344
345void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
346{
347 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
348
349 amdgpu_bo_unref(&bo);
350}
351
352uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
353 enum kgd_engine_type type)
354{
355 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
356
357 switch (type) {
358 case KGD_ENGINE_PFP:
359 return adev->gfx.pfp_fw_version;
360
361 case KGD_ENGINE_ME:
362 return adev->gfx.me_fw_version;
363
364 case KGD_ENGINE_CE:
365 return adev->gfx.ce_fw_version;
366
367 case KGD_ENGINE_MEC1:
368 return adev->gfx.mec_fw_version;
369
370 case KGD_ENGINE_MEC2:
371 return adev->gfx.mec2_fw_version;
372
373 case KGD_ENGINE_RLC:
374 return adev->gfx.rlc_fw_version;
375
376 case KGD_ENGINE_SDMA1:
377 return adev->sdma.instance[0].fw_version;
378
379 case KGD_ENGINE_SDMA2:
380 return adev->sdma.instance[1].fw_version;
381
382 default:
383 return 0;
384 }
385
386 return 0;
387}
388
389void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
390 struct kfd_local_mem_info *mem_info)
391{
392 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
393 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
394 ~((1ULL << 32) - 1);
395 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
396
397 memset(mem_info, 0, sizeof(*mem_info));
398 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
399 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
400 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
401 adev->gmc.visible_vram_size;
402 } else {
403 mem_info->local_mem_size_public = 0;
404 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
405 }
406 mem_info->vram_width = adev->gmc.vram_width;
407
408 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
409 &adev->gmc.aper_base, &aper_limit,
410 mem_info->local_mem_size_public,
411 mem_info->local_mem_size_private);
412
413 if (amdgpu_sriov_vf(adev))
414 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
415 else if (adev->pm.dpm_enabled) {
416 if (amdgpu_emu_mode == 1)
417 mem_info->mem_clk_max = 0;
418 else
419 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
420 } else
421 mem_info->mem_clk_max = 100;
422}
423
424uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
425{
426 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
427
428 if (adev->gfx.funcs->get_gpu_clock_counter)
429 return adev->gfx.funcs->get_gpu_clock_counter(adev);
430 return 0;
431}
432
433uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
434{
435 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
436
437
438 if (amdgpu_sriov_vf(adev))
439 return adev->clock.default_sclk / 100;
440 else if (adev->pm.dpm_enabled)
441 return amdgpu_dpm_get_sclk(adev, false) / 100;
442 else
443 return 100;
444}
445
446void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
447{
448 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
449 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
450
451 memset(cu_info, 0, sizeof(*cu_info));
452 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
453 return;
454
455 cu_info->cu_active_number = acu_info.number;
456 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
457 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
458 sizeof(acu_info.bitmap));
459 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
460 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
461 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
462 cu_info->simd_per_cu = acu_info.simd_per_cu;
463 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
464 cu_info->wave_front_size = acu_info.wave_front_size;
465 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
466 cu_info->lds_size = acu_info.lds_size;
467}
468
469int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
470 struct kgd_dev **dma_buf_kgd,
471 uint64_t *bo_size, void *metadata_buffer,
472 size_t buffer_size, uint32_t *metadata_size,
473 uint32_t *flags)
474{
475 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
476 struct dma_buf *dma_buf;
477 struct drm_gem_object *obj;
478 struct amdgpu_bo *bo;
479 uint64_t metadata_flags;
480 int r = -EINVAL;
481
482 dma_buf = dma_buf_get(dma_buf_fd);
483 if (IS_ERR(dma_buf))
484 return PTR_ERR(dma_buf);
485
486 if (dma_buf->ops != &amdgpu_dmabuf_ops)
487
488 goto out_put;
489
490 obj = dma_buf->priv;
491 if (obj->dev->driver != adev_to_drm(adev)->driver)
492
493 goto out_put;
494
495 adev = drm_to_adev(obj->dev);
496 bo = gem_to_amdgpu_bo(obj);
497 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
498 AMDGPU_GEM_DOMAIN_GTT)))
499
500 goto out_put;
501
502 r = 0;
503 if (dma_buf_kgd)
504 *dma_buf_kgd = (struct kgd_dev *)adev;
505 if (bo_size)
506 *bo_size = amdgpu_bo_size(bo);
507 if (metadata_size)
508 *metadata_size = bo->metadata_size;
509 if (metadata_buffer)
510 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
511 metadata_size, &metadata_flags);
512 if (flags) {
513 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
514 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
515 : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
516
517 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
518 *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
519 }
520
521out_put:
522 dma_buf_put(dma_buf);
523 return r;
524}
525
526uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
527{
528 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
529 struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
530
531 return amdgpu_vram_mgr_usage(vram_man);
532}
533
534uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
535{
536 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
537
538 return adev->gmc.xgmi.hive_id;
539}
540
541uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
542{
543 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
544
545 return adev->unique_id;
546}
547
548uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
549{
550 struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
551 struct amdgpu_device *adev = (struct amdgpu_device *)dst;
552 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
553
554 if (ret < 0) {
555 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
556 adev->gmc.xgmi.physical_node_id,
557 peer_adev->gmc.xgmi.physical_node_id, ret);
558 ret = 0;
559 }
560 return (uint8_t)ret;
561}
562
563uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
564{
565 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
566
567 return adev->rmmio_remap.bus_addr;
568}
569
570uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
571{
572 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
573
574 return adev->gds.gws_size;
575}
576
577uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
578{
579 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
580
581 return adev->rev_id;
582}
583
584int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
585{
586 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
587
588 return adev->gmc.noretry;
589}
590
591int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
592 uint32_t vmid, uint64_t gpu_addr,
593 uint32_t *ib_cmd, uint32_t ib_len)
594{
595 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
596 struct amdgpu_job *job;
597 struct amdgpu_ib *ib;
598 struct amdgpu_ring *ring;
599 struct dma_fence *f = NULL;
600 int ret;
601
602 switch (engine) {
603 case KGD_ENGINE_MEC1:
604 ring = &adev->gfx.compute_ring[0];
605 break;
606 case KGD_ENGINE_SDMA1:
607 ring = &adev->sdma.instance[0].ring;
608 break;
609 case KGD_ENGINE_SDMA2:
610 ring = &adev->sdma.instance[1].ring;
611 break;
612 default:
613 pr_err("Invalid engine in IB submission: %d\n", engine);
614 ret = -EINVAL;
615 goto err;
616 }
617
618 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
619 if (ret)
620 goto err;
621
622 ib = &job->ibs[0];
623 memset(ib, 0, sizeof(struct amdgpu_ib));
624
625 ib->gpu_addr = gpu_addr;
626 ib->ptr = ib_cmd;
627 ib->length_dw = ib_len;
628
629 job->vmid = vmid;
630
631 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
632
633 if (ret) {
634 DRM_ERROR("amdgpu: failed to schedule IB.\n");
635 goto err_ib_sched;
636 }
637
638 ret = dma_fence_wait(f, false);
639
640err_ib_sched:
641 dma_fence_put(f);
642 amdgpu_job_free(job);
643err:
644 return ret;
645}
646
647void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
648{
649 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
650
651 amdgpu_dpm_switch_power_profile(adev,
652 PP_SMC_POWER_PROFILE_COMPUTE,
653 !idle);
654}
655
656bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
657{
658 if (adev->kfd.dev)
659 return vmid >= adev->vm_manager.first_kfd_vmid;
660
661 return false;
662}
663
664int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
665{
666 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
667
668 if (adev->family == AMDGPU_FAMILY_AI) {
669 int i;
670
671 for (i = 0; i < adev->num_vmhubs; i++)
672 amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
673 } else {
674 amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
675 }
676
677 return 0;
678}
679
680int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
681{
682 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
683 const uint32_t flush_type = 0;
684 bool all_hub = false;
685
686 if (adev->family == AMDGPU_FAMILY_AI)
687 all_hub = true;
688
689 return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
690}
691
692bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
693{
694 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
695
696 return adev->have_atomics_support;
697}
698
699#ifndef CONFIG_HSA_AMD
700bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
701{
702 return false;
703}
704
705void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
706{
707}
708
709int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
710{
711 return 0;
712}
713
714void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
715 struct amdgpu_vm *vm)
716{
717}
718
719struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
720{
721 return NULL;
722}
723
724int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
725{
726 return 0;
727}
728
729struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
730 unsigned int asic_type, bool vf)
731{
732 return NULL;
733}
734
735bool kgd2kfd_device_init(struct kfd_dev *kfd,
736 struct drm_device *ddev,
737 const struct kgd2kfd_shared_resources *gpu_resources)
738{
739 return false;
740}
741
742void kgd2kfd_device_exit(struct kfd_dev *kfd)
743{
744}
745
746void kgd2kfd_exit(void)
747{
748}
749
750void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
751{
752}
753
754int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
755{
756 return 0;
757}
758
759int kgd2kfd_pre_reset(struct kfd_dev *kfd)
760{
761 return 0;
762}
763
764int kgd2kfd_post_reset(struct kfd_dev *kfd)
765{
766 return 0;
767}
768
769void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
770{
771}
772
773void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
774{
775}
776
777void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
778{
779}
780#endif
781