1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef AMDGPU_AMDKFD_H_INCLUDED
26#define AMDGPU_AMDKFD_H_INCLUDED
27
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/kthread.h>
31#include <linux/workqueue.h>
32#include <kgd_kfd_interface.h>
33#include <drm/ttm/ttm_execbuf_util.h>
34#include "amdgpu_sync.h"
35#include "amdgpu_vm.h"
36
37extern uint64_t amdgpu_amdkfd_total_mem_size;
38
39enum TLB_FLUSH_TYPE {
40 TLB_FLUSH_LEGACY = 0,
41 TLB_FLUSH_LIGHTWEIGHT,
42 TLB_FLUSH_HEAVYWEIGHT
43};
44
45struct amdgpu_device;
46
47enum kfd_mem_attachment_type {
48 KFD_MEM_ATT_SHARED,
49 KFD_MEM_ATT_USERPTR,
50 KFD_MEM_ATT_DMABUF,
51};
52
53struct kfd_mem_attachment {
54 struct list_head list;
55 enum kfd_mem_attachment_type type;
56 bool is_mapped;
57 struct amdgpu_bo_va *bo_va;
58 struct amdgpu_device *adev;
59 uint64_t va;
60 uint64_t pte_flags;
61};
62
63struct kgd_mem {
64 struct mutex lock;
65 struct amdgpu_bo *bo;
66 struct dma_buf *dmabuf;
67 struct list_head attachments;
68
69 struct ttm_validate_buffer validate_list;
70 struct ttm_validate_buffer resv_list;
71 uint32_t domain;
72 unsigned int mapped_to_gpu_memory;
73 uint64_t va;
74
75 uint32_t alloc_flags;
76
77 atomic_t invalid;
78 struct amdkfd_process_info *process_info;
79
80 struct amdgpu_sync sync;
81
82 bool aql_queue;
83 bool is_imported;
84};
85
86
87struct amdgpu_amdkfd_fence {
88 struct dma_fence base;
89 struct mm_struct *mm;
90 spinlock_t lock;
91 char timeline_name[TASK_COMM_LEN];
92 struct svm_range_bo *svm_bo;
93};
94
95struct amdgpu_kfd_dev {
96 struct kfd_dev *dev;
97 uint64_t vram_used;
98 bool init_complete;
99};
100
101enum kgd_engine_type {
102 KGD_ENGINE_PFP = 1,
103 KGD_ENGINE_ME,
104 KGD_ENGINE_CE,
105 KGD_ENGINE_MEC1,
106 KGD_ENGINE_MEC2,
107 KGD_ENGINE_RLC,
108 KGD_ENGINE_SDMA1,
109 KGD_ENGINE_SDMA2,
110 KGD_ENGINE_MAX
111};
112
113
114struct amdkfd_process_info {
115
116 struct list_head vm_list_head;
117
118 struct list_head kfd_bo_list;
119
120 struct list_head userptr_valid_list;
121 struct list_head userptr_inval_list;
122
123 struct mutex lock;
124
125
126 unsigned int n_vms;
127
128 struct amdgpu_amdkfd_fence *eviction_fence;
129
130
131 atomic_t evicted_bos;
132 struct delayed_work restore_userptr_work;
133 struct pid *pid;
134};
135
136int amdgpu_amdkfd_init(void);
137void amdgpu_amdkfd_fini(void);
138
139void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
140int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
141int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
142void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
143 const void *ih_ring_entry);
144void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
145void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
146void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
147int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
148 uint32_t vmid, uint64_t gpu_addr,
149 uint32_t *ib_cmd, uint32_t ib_len);
150void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
151bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
152int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
153int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid,
154 enum TLB_FLUSH_TYPE flush_type);
155
156bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
157
158int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
159
160int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
161
162void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
163
164int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
165 int queue_bit);
166
167struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
168 struct mm_struct *mm,
169 struct svm_range_bo *svm_bo);
170#if IS_ENABLED(CONFIG_HSA_AMD)
171bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
172struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
173int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
174int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
175#else
176static inline
177bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
178{
179 return false;
180}
181
182static inline
183struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
184{
185 return NULL;
186}
187
188static inline
189int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
190{
191 return 0;
192}
193
194static inline
195int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
196{
197 return 0;
198}
199#endif
200
201int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
202 void **mem_obj, uint64_t *gpu_addr,
203 void **cpu_ptr, bool mqd_gfx9);
204void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
205int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj);
206void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj);
207int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
208int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
209uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
210 enum kgd_engine_type type);
211void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
212 struct kfd_local_mem_info *mem_info);
213uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
214
215uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
216void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
217int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
218 struct kgd_dev **dmabuf_kgd,
219 uint64_t *bo_size, void *metadata_buffer,
220 size_t buffer_size, uint32_t *metadata_size,
221 uint32_t *flags);
222uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
223uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
224uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
225uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
226uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
227uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
228int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
229uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
230int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min);
231int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
232
233
234
235
236
237
238
239#define read_user_wptr(mmptr, wptr, dst) \
240 ({ \
241 bool valid = false; \
242 if ((mmptr) && (wptr)) { \
243 pagefault_disable(); \
244 if ((mmptr) == current->mm) { \
245 valid = !get_user((dst), (wptr)); \
246 } else if (current->flags & PF_KTHREAD) { \
247 kthread_use_mm(mmptr); \
248 valid = !get_user((dst), (wptr)); \
249 kthread_unuse_mm(mmptr); \
250 } \
251 pagefault_enable(); \
252 } \
253 valid; \
254 })
255
256
257#define drm_priv_to_vm(drm_priv) \
258 (&((struct amdgpu_fpriv *) \
259 ((struct drm_file *)(drm_priv))->driver_priv)->vm)
260
261int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
262 struct file *filp, u32 pasid,
263 void **process_info,
264 struct dma_fence **ef);
265void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv);
266uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
267int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
268 struct kgd_dev *kgd, uint64_t va, uint64_t size,
269 void *drm_priv, struct kgd_mem **mem,
270 uint64_t *offset, uint32_t flags);
271int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
272 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
273 uint64_t *size);
274int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
275 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
276int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
277 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
278int amdgpu_amdkfd_gpuvm_sync_memory(
279 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
280int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
281 struct kgd_mem *mem, void **kptr, uint64_t *size);
282int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
283 struct dma_fence **ef);
284int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
285 struct kfd_vm_fault_info *info);
286int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
287 struct dma_buf *dmabuf,
288 uint64_t va, void *drm_priv,
289 struct kgd_mem **mem, uint64_t *size,
290 uint64_t *mmap_offset);
291int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
292 struct tile_config *config);
293#if IS_ENABLED(CONFIG_HSA_AMD)
294void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
295void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
296 struct amdgpu_vm *vm);
297void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
298void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
299#else
300static inline
301void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
302{
303}
304
305static inline
306void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
307 struct amdgpu_vm *vm)
308{
309}
310
311static inline
312void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
313{
314}
315#endif
316
317int kgd2kfd_quiesce_mm(struct mm_struct *mm);
318int kgd2kfd_resume_mm(struct mm_struct *mm);
319int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
320 struct dma_fence *fence);
321#if IS_ENABLED(CONFIG_HSA_AMD)
322int kgd2kfd_init(void);
323void kgd2kfd_exit(void);
324struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
325 unsigned int asic_type, bool vf);
326bool kgd2kfd_device_init(struct kfd_dev *kfd,
327 struct drm_device *ddev,
328 const struct kgd2kfd_shared_resources *gpu_resources);
329void kgd2kfd_device_exit(struct kfd_dev *kfd);
330void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
331int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
332int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
333int kgd2kfd_pre_reset(struct kfd_dev *kfd);
334int kgd2kfd_post_reset(struct kfd_dev *kfd);
335void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
336void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
337void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
338#else
339static inline int kgd2kfd_init(void)
340{
341 return -ENOENT;
342}
343
344static inline void kgd2kfd_exit(void)
345{
346}
347
348static inline
349struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
350 unsigned int asic_type, bool vf)
351{
352 return NULL;
353}
354
355static inline
356bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev,
357 const struct kgd2kfd_shared_resources *gpu_resources)
358{
359 return false;
360}
361
362static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
363{
364}
365
366static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
367{
368}
369
370static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
371{
372 return 0;
373}
374
375static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
376{
377 return 0;
378}
379
380static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
381{
382 return 0;
383}
384
385static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
386{
387 return 0;
388}
389
390static inline
391void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
392{
393}
394
395static inline
396void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
397{
398}
399
400static inline
401void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
402{
403}
404#endif
405#endif
406