1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#ifndef __AMDGPU_VM_H__
25#define __AMDGPU_VM_H__
26
27#include <linux/idr.h>
28#include <linux/kfifo.h>
29#include <linux/rbtree.h>
30#include <drm/gpu_scheduler.h>
31#include <drm/drm_file.h>
32#include <drm/ttm/ttm_bo_driver.h>
33#include <linux/sched/mm.h>
34
35#include "amdgpu_sync.h"
36#include "amdgpu_ring.h"
37#include "amdgpu_ids.h"
38
39struct amdgpu_bo_va;
40struct amdgpu_job;
41struct amdgpu_bo_list_entry;
42struct amdgpu_bo_vm;
43
44
45
46
47
48
49#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
50
51
52#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
53
54#define AMDGPU_PTE_VALID (1ULL << 0)
55#define AMDGPU_PTE_SYSTEM (1ULL << 1)
56#define AMDGPU_PTE_SNOOPED (1ULL << 2)
57
58
59#define AMDGPU_PTE_TMZ (1ULL << 3)
60
61
62#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
63
64#define AMDGPU_PTE_READABLE (1ULL << 5)
65#define AMDGPU_PTE_WRITEABLE (1ULL << 6)
66
67#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
68
69
70#define AMDGPU_PTE_PRT (1ULL << 51)
71
72
73#define AMDGPU_PDE_PTE (1ULL << 54)
74
75#define AMDGPU_PTE_LOG (1ULL << 55)
76
77
78#define AMDGPU_PTE_TF (1ULL << 56)
79
80
81#define AMDGPU_PTE_NOALLOC (1ULL << 58)
82
83
84#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
85
86
87
88#define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
89#define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
90
91#define AMDGPU_MTYPE_NC 0
92#define AMDGPU_MTYPE_CC 2
93
94#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
95 | AMDGPU_PTE_SNOOPED \
96 | AMDGPU_PTE_EXECUTABLE \
97 | AMDGPU_PTE_READABLE \
98 | AMDGPU_PTE_WRITEABLE \
99 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
100
101
102#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
103#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
104
105
106#define AMDGPU_VM_FAULT_STOP_NEVER 0
107#define AMDGPU_VM_FAULT_STOP_FIRST 1
108#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
109
110
111#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
112
113
114#define AMDGPU_MAX_VMHUBS 3
115#define AMDGPU_GFXHUB_0 0
116#define AMDGPU_MMHUB_0 1
117#define AMDGPU_MMHUB_1 2
118
119
120#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
121
122
123#define AMDGPU_VM_MAX_RESERVED_VMID 1
124
125
126#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
127#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
128
129
130
131
132enum amdgpu_vm_level {
133 AMDGPU_VM_PDB2,
134 AMDGPU_VM_PDB1,
135 AMDGPU_VM_PDB0,
136 AMDGPU_VM_PTB
137};
138
139
140struct amdgpu_vm_bo_base {
141
142 struct amdgpu_vm *vm;
143 struct amdgpu_bo *bo;
144
145
146 struct amdgpu_vm_bo_base *next;
147
148
149 struct list_head vm_status;
150
151
152 bool moved;
153};
154
155
156struct amdgpu_vm_pte_funcs {
157
158 unsigned copy_pte_num_dw;
159
160
161 void (*copy_pte)(struct amdgpu_ib *ib,
162 uint64_t pe, uint64_t src,
163 unsigned count);
164
165
166 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
167 uint64_t value, unsigned count,
168 uint32_t incr);
169
170 void (*set_pte_pde)(struct amdgpu_ib *ib,
171 uint64_t pe,
172 uint64_t addr, unsigned count,
173 uint32_t incr, uint64_t flags);
174};
175
176struct amdgpu_task_info {
177 char process_name[TASK_COMM_LEN];
178 char task_name[TASK_COMM_LEN];
179 pid_t pid;
180 pid_t tgid;
181};
182
183
184
185
186
187
188
189
190struct amdgpu_vm_update_params {
191
192
193
194
195 struct amdgpu_device *adev;
196
197
198
199
200 struct amdgpu_vm *vm;
201
202
203
204
205 bool immediate;
206
207
208
209
210 bool unlocked;
211
212
213
214
215
216
217 dma_addr_t *pages_addr;
218
219
220
221
222 struct amdgpu_job *job;
223
224
225
226
227 unsigned int num_dw_left;
228
229
230
231
232 bool table_freed;
233};
234
235struct amdgpu_vm_update_funcs {
236 int (*map_table)(struct amdgpu_bo_vm *bo);
237 int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
238 enum amdgpu_sync_mode sync_mode);
239 int (*update)(struct amdgpu_vm_update_params *p,
240 struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
241 unsigned count, uint32_t incr, uint64_t flags);
242 int (*commit)(struct amdgpu_vm_update_params *p,
243 struct dma_fence **fence);
244};
245
246struct amdgpu_vm {
247
248 struct rb_root_cached va;
249
250
251
252
253 struct mutex eviction_lock;
254 bool evicting;
255 unsigned int saved_flags;
256
257
258 struct list_head evicted;
259
260
261 struct list_head relocated;
262
263
264 struct list_head moved;
265
266
267 struct list_head idle;
268
269
270 struct list_head invalidated;
271 spinlock_t invalidated_lock;
272
273
274 struct list_head freed;
275
276
277 struct list_head done;
278
279
280 struct amdgpu_vm_bo_base root;
281 struct dma_fence *last_update;
282
283
284 struct drm_sched_entity immediate;
285 struct drm_sched_entity delayed;
286
287
288 atomic64_t tlb_seq;
289 struct dma_fence *last_tlb_flush;
290
291
292 struct dma_fence *last_unlocked;
293
294 unsigned int pasid;
295
296 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
297
298
299 bool use_cpu_for_update;
300
301
302 const struct amdgpu_vm_update_funcs *update_funcs;
303
304
305 bool pte_support_ats;
306
307
308 DECLARE_KFIFO(faults, u64, 128);
309
310
311 struct amdkfd_process_info *process_info;
312
313
314 struct list_head vm_list_node;
315
316
317 uint64_t pd_phys_addr;
318
319
320 struct amdgpu_task_info task_info;
321
322
323 struct ttm_lru_bulk_move lru_bulk_move;
324
325 bool is_compute_context;
326};
327
328struct amdgpu_vm_manager {
329
330 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
331 unsigned int first_kfd_vmid;
332 bool concurrent_flush;
333
334
335 u64 fence_context;
336 unsigned seqno[AMDGPU_MAX_RINGS];
337
338 uint64_t max_pfn;
339 uint32_t num_level;
340 uint32_t block_size;
341 uint32_t fragment_size;
342 enum amdgpu_vm_level root_level;
343
344 u64 vram_base_offset;
345
346 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
347 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
348 unsigned vm_pte_num_scheds;
349 struct amdgpu_ring *page_fault;
350
351
352 spinlock_t prt_lock;
353 atomic_t num_prt_users;
354
355
356
357
358
359 int vm_update_mode;
360
361
362
363
364 struct xarray pasids;
365};
366
367struct amdgpu_bo_va_mapping;
368
369#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
370#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
371#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
372
373extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
374extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
375
376void amdgpu_vm_manager_init(struct amdgpu_device *adev);
377void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
378
379int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
380 u32 pasid);
381
382long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
383int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
384int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
385void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
386void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
387void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
388 struct list_head *validated,
389 struct amdgpu_bo_list_entry *entry);
390bool amdgpu_vm_ready(struct amdgpu_vm *vm);
391int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
392 int (*callback)(void *p, struct amdgpu_bo *bo),
393 void *param);
394int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
395int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
396 struct amdgpu_vm *vm, bool immediate);
397int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
398 struct amdgpu_vm *vm,
399 struct dma_fence **fence);
400int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
401 struct amdgpu_vm *vm);
402void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
403 struct amdgpu_vm *vm, struct amdgpu_bo *bo);
404int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
405 bool immediate, bool unlocked, bool flush_tlb,
406 struct dma_resv *resv, uint64_t start, uint64_t last,
407 uint64_t flags, uint64_t offset, uint64_t vram_base,
408 struct ttm_resource *res, dma_addr_t *pages_addr,
409 struct dma_fence **fence);
410int amdgpu_vm_bo_update(struct amdgpu_device *adev,
411 struct amdgpu_bo_va *bo_va,
412 bool clear);
413bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
414void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
415 struct amdgpu_bo *bo, bool evicted);
416uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
417struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
418 struct amdgpu_bo *bo);
419struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
420 struct amdgpu_vm *vm,
421 struct amdgpu_bo *bo);
422int amdgpu_vm_bo_map(struct amdgpu_device *adev,
423 struct amdgpu_bo_va *bo_va,
424 uint64_t addr, uint64_t offset,
425 uint64_t size, uint64_t flags);
426int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
427 struct amdgpu_bo_va *bo_va,
428 uint64_t addr, uint64_t offset,
429 uint64_t size, uint64_t flags);
430int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
431 struct amdgpu_bo_va *bo_va,
432 uint64_t addr);
433int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
434 struct amdgpu_vm *vm,
435 uint64_t saddr, uint64_t size);
436struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
437 uint64_t addr);
438void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
439void amdgpu_vm_bo_del(struct amdgpu_device *adev,
440 struct amdgpu_bo_va *bo_va);
441void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
442 uint32_t fragment_size_default, unsigned max_level,
443 unsigned max_bits);
444int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
445bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
446 struct amdgpu_job *job);
447void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
448
449void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
450 struct amdgpu_task_info *task_info);
451bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
452 uint64_t addr, bool write_fault);
453
454void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
455
456void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
457 struct amdgpu_vm *vm);
458void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
459 uint64_t *gtt_mem, uint64_t *cpu_mem);
460
461int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
462 struct amdgpu_bo_vm *vmbo, bool immediate);
463int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
464 int level, bool immediate, struct amdgpu_bo_vm **vmbo);
465void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
466bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
467 struct amdgpu_vm *vm);
468
469int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
470 struct amdgpu_vm_bo_base *entry);
471int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
472 uint64_t start, uint64_t end,
473 uint64_t dst, uint64_t flags);
474
475#if defined(CONFIG_DEBUG_FS)
476void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
477#endif
478
479
480
481
482
483
484
485
486static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
487{
488 return atomic64_read(&vm->tlb_seq);
489}
490
491#endif
492