1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#ifndef __AMDGPU_VM_H__
25#define __AMDGPU_VM_H__
26
27#include <linux/idr.h>
28#include <linux/kfifo.h>
29#include <linux/rbtree.h>
30#include <drm/gpu_scheduler.h>
31#include <drm/drm_file.h>
32#include <drm/ttm/ttm_bo_driver.h>
33#include <linux/sched/mm.h>
34
35#include "amdgpu_sync.h"
36#include "amdgpu_ring.h"
37#include "amdgpu_ids.h"
38
39struct amdgpu_bo_va;
40struct amdgpu_job;
41struct amdgpu_bo_list_entry;
42
43
44
45
46
47
48#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
49
50
51#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
52
53#define AMDGPU_PTE_VALID (1ULL << 0)
54#define AMDGPU_PTE_SYSTEM (1ULL << 1)
55#define AMDGPU_PTE_SNOOPED (1ULL << 2)
56
57
58#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
59
60#define AMDGPU_PTE_READABLE (1ULL << 5)
61#define AMDGPU_PTE_WRITEABLE (1ULL << 6)
62
63#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
64
65
66#define AMDGPU_PTE_PRT (1ULL << 51)
67
68
69#define AMDGPU_PDE_PTE (1ULL << 54)
70
71#define AMDGPU_PTE_LOG (1ULL << 55)
72
73
74#define AMDGPU_PTE_TF (1ULL << 56)
75
76
77#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
78
79
80
81#define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
82#define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
83
84#define AMDGPU_MTYPE_NC 0
85#define AMDGPU_MTYPE_CC 2
86
87#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
88 | AMDGPU_PTE_SNOOPED \
89 | AMDGPU_PTE_EXECUTABLE \
90 | AMDGPU_PTE_READABLE \
91 | AMDGPU_PTE_WRITEABLE \
92 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
93
94
95#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
96#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
97
98
99#define AMDGPU_VM_FAULT_STOP_NEVER 0
100#define AMDGPU_VM_FAULT_STOP_FIRST 1
101#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
102
103
104#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
105
106
107#define AMDGPU_MAX_VMHUBS 3
108#define AMDGPU_GFXHUB_0 0
109#define AMDGPU_MMHUB_0 1
110#define AMDGPU_MMHUB_1 2
111
112
113#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
114
115
116#define AMDGPU_VM_MAX_RESERVED_VMID 1
117
118#define AMDGPU_VM_CONTEXT_GFX 0
119#define AMDGPU_VM_CONTEXT_COMPUTE 1
120
121
122#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
123#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
124
125
126
127
128enum amdgpu_vm_level {
129 AMDGPU_VM_PDB2,
130 AMDGPU_VM_PDB1,
131 AMDGPU_VM_PDB0,
132 AMDGPU_VM_PTB
133};
134
135
136struct amdgpu_vm_bo_base {
137
138 struct amdgpu_vm *vm;
139 struct amdgpu_bo *bo;
140
141
142 struct amdgpu_vm_bo_base *next;
143
144
145 struct list_head vm_status;
146
147
148 bool moved;
149};
150
151struct amdgpu_vm_pt {
152 struct amdgpu_vm_bo_base base;
153
154
155 struct amdgpu_vm_pt *entries;
156};
157
158
159struct amdgpu_vm_pte_funcs {
160
161 unsigned copy_pte_num_dw;
162
163
164 void (*copy_pte)(struct amdgpu_ib *ib,
165 uint64_t pe, uint64_t src,
166 unsigned count);
167
168
169 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
170 uint64_t value, unsigned count,
171 uint32_t incr);
172
173 void (*set_pte_pde)(struct amdgpu_ib *ib,
174 uint64_t pe,
175 uint64_t addr, unsigned count,
176 uint32_t incr, uint64_t flags);
177};
178
179struct amdgpu_task_info {
180 char process_name[TASK_COMM_LEN];
181 char task_name[TASK_COMM_LEN];
182 pid_t pid;
183 pid_t tgid;
184};
185
186
187
188
189
190
191
192
193struct amdgpu_vm_update_params {
194
195
196
197
198 struct amdgpu_device *adev;
199
200
201
202
203 struct amdgpu_vm *vm;
204
205
206
207
208 bool direct;
209
210
211
212
213
214
215 dma_addr_t *pages_addr;
216
217
218
219
220 struct amdgpu_job *job;
221
222
223
224
225 unsigned int num_dw_left;
226};
227
228struct amdgpu_vm_update_funcs {
229 int (*map_table)(struct amdgpu_bo *bo);
230 int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
231 struct dma_fence *exclusive);
232 int (*update)(struct amdgpu_vm_update_params *p,
233 struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
234 unsigned count, uint32_t incr, uint64_t flags);
235 int (*commit)(struct amdgpu_vm_update_params *p,
236 struct dma_fence **fence);
237};
238
239struct amdgpu_vm {
240
241 struct rb_root_cached va;
242
243
244
245
246 struct mutex eviction_lock;
247 bool evicting;
248 unsigned int saved_flags;
249
250
251 struct list_head evicted;
252
253
254 struct list_head relocated;
255
256
257 struct list_head moved;
258
259
260 struct list_head idle;
261
262
263 struct list_head invalidated;
264 spinlock_t invalidated_lock;
265
266
267 struct list_head freed;
268
269
270 struct amdgpu_vm_pt root;
271 struct dma_fence *last_update;
272
273
274 struct drm_sched_entity direct;
275 struct drm_sched_entity delayed;
276
277
278 struct dma_fence *last_direct;
279 struct dma_fence *last_delayed;
280
281 unsigned int pasid;
282
283 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
284
285
286 bool use_cpu_for_update;
287
288
289 const struct amdgpu_vm_update_funcs *update_funcs;
290
291
292 bool pte_support_ats;
293
294
295 DECLARE_KFIFO(faults, u64, 128);
296
297
298 struct amdkfd_process_info *process_info;
299
300
301 struct list_head vm_list_node;
302
303
304 uint64_t pd_phys_addr;
305
306
307 struct amdgpu_task_info task_info;
308
309
310 struct ttm_lru_bulk_move lru_bulk_move;
311
312 bool bulk_moveable;
313
314 bool is_compute_context;
315};
316
317struct amdgpu_vm_manager {
318
319 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
320
321
322 u64 fence_context;
323 unsigned seqno[AMDGPU_MAX_RINGS];
324
325 uint64_t max_pfn;
326 uint32_t num_level;
327 uint32_t block_size;
328 uint32_t fragment_size;
329 enum amdgpu_vm_level root_level;
330
331 u64 vram_base_offset;
332
333 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
334 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
335 unsigned vm_pte_num_scheds;
336 struct amdgpu_ring *page_fault;
337
338
339 spinlock_t prt_lock;
340 atomic_t num_prt_users;
341
342
343
344
345
346 int vm_update_mode;
347
348
349
350
351 struct idr pasid_idr;
352 spinlock_t pasid_lock;
353
354
355 uint32_t xgmi_map_counter;
356 struct mutex lock_pstate;
357};
358
359#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
360#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
361#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
362
363extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
364extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
365
366void amdgpu_vm_manager_init(struct amdgpu_device *adev);
367void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
368
369long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
370int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
371 int vm_context, unsigned int pasid);
372int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
373void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
374void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
375void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
376 struct list_head *validated,
377 struct amdgpu_bo_list_entry *entry);
378bool amdgpu_vm_ready(struct amdgpu_vm *vm);
379int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
380 int (*callback)(void *p, struct amdgpu_bo *bo),
381 void *param);
382int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
383int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
384 struct amdgpu_vm *vm, bool direct);
385int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
386 struct amdgpu_vm *vm,
387 struct dma_fence **fence);
388int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
389 struct amdgpu_vm *vm);
390int amdgpu_vm_bo_update(struct amdgpu_device *adev,
391 struct amdgpu_bo_va *bo_va,
392 bool clear);
393bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
394void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
395 struct amdgpu_bo *bo, bool evicted);
396uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
397struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
398 struct amdgpu_bo *bo);
399struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
400 struct amdgpu_vm *vm,
401 struct amdgpu_bo *bo);
402int amdgpu_vm_bo_map(struct amdgpu_device *adev,
403 struct amdgpu_bo_va *bo_va,
404 uint64_t addr, uint64_t offset,
405 uint64_t size, uint64_t flags);
406int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
407 struct amdgpu_bo_va *bo_va,
408 uint64_t addr, uint64_t offset,
409 uint64_t size, uint64_t flags);
410int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
411 struct amdgpu_bo_va *bo_va,
412 uint64_t addr);
413int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
414 struct amdgpu_vm *vm,
415 uint64_t saddr, uint64_t size);
416struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
417 uint64_t addr);
418void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
419void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
420 struct amdgpu_bo_va *bo_va);
421void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
422 uint32_t fragment_size_default, unsigned max_level,
423 unsigned max_bits);
424int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
425bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
426 struct amdgpu_job *job);
427void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
428
429void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
430 struct amdgpu_task_info *task_info);
431bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
432 uint64_t addr);
433
434void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
435
436void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
437 struct amdgpu_vm *vm);
438void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
439
440#endif
441