1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35
36
37struct amdgpu_bo_va_mapping {
38 struct list_head list;
39 struct rb_node rb;
40 uint64_t start;
41 uint64_t last;
42 uint64_t __subtree_last;
43 uint64_t offset;
44 uint64_t flags;
45};
46
47
48struct amdgpu_bo_va {
49 struct amdgpu_vm_bo_base base;
50
51
52 struct dma_fence *last_pt_update;
53 unsigned ref_count;
54
55
56 struct list_head invalids;
57 struct list_head valids;
58};
59
60struct amdgpu_bo {
61
62 u32 preferred_domains;
63 u32 allowed_domains;
64 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
65 struct ttm_placement placement;
66 struct ttm_buffer_object tbo;
67 struct ttm_bo_kmap_obj kmap;
68 u64 flags;
69 unsigned pin_count;
70 u64 tiling_flags;
71 u64 metadata_flags;
72 void *metadata;
73 u32 metadata_size;
74 unsigned prime_shared_count;
75
76 struct list_head va;
77
78 struct drm_gem_object gem_base;
79 struct amdgpu_bo *parent;
80 struct amdgpu_bo *shadow;
81
82 struct ttm_bo_kmap_obj dma_buf_vmap;
83 struct amdgpu_mn *mn;
84
85 union {
86 struct list_head mn_list;
87 struct list_head shadow_list;
88 };
89};
90
91
92
93
94
95
96
97static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
98{
99 switch (mem_type) {
100 case TTM_PL_VRAM:
101 return AMDGPU_GEM_DOMAIN_VRAM;
102 case TTM_PL_TT:
103 return AMDGPU_GEM_DOMAIN_GTT;
104 case TTM_PL_SYSTEM:
105 return AMDGPU_GEM_DOMAIN_CPU;
106 case AMDGPU_PL_GDS:
107 return AMDGPU_GEM_DOMAIN_GDS;
108 case AMDGPU_PL_GWS:
109 return AMDGPU_GEM_DOMAIN_GWS;
110 case AMDGPU_PL_OA:
111 return AMDGPU_GEM_DOMAIN_OA;
112 default:
113 break;
114 }
115 return 0;
116}
117
118
119
120
121
122
123
124
125
126
127static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
128{
129 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
130 int r;
131
132 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
133 if (unlikely(r != 0)) {
134 if (r != -ERESTARTSYS)
135 dev_err(adev->dev, "%p reserve failed\n", bo);
136 return r;
137 }
138 return 0;
139}
140
141static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
142{
143 ttm_bo_unreserve(&bo->tbo);
144}
145
146static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
147{
148 return bo->tbo.num_pages << PAGE_SHIFT;
149}
150
151static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
152{
153 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
154}
155
156static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
157{
158 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
159}
160
161
162
163
164
165
166
167static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
168{
169 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
170}
171
172
173
174
175
176static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
177{
178 switch (bo->tbo.mem.mem_type) {
179 case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
180 case TTM_PL_VRAM: return true;
181 default: return false;
182 }
183}
184
185int amdgpu_bo_create(struct amdgpu_device *adev,
186 unsigned long size, int byte_align,
187 bool kernel, u32 domain, u64 flags,
188 struct sg_table *sg,
189 struct reservation_object *resv,
190 uint64_t init_value,
191 struct amdgpu_bo **bo_ptr);
192int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
193 unsigned long size, int byte_align,
194 bool kernel, u32 domain, u64 flags,
195 struct sg_table *sg,
196 struct ttm_placement *placement,
197 struct reservation_object *resv,
198 uint64_t init_value,
199 struct amdgpu_bo **bo_ptr);
200int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
201 unsigned long size, int align,
202 u32 domain, struct amdgpu_bo **bo_ptr,
203 u64 *gpu_addr, void **cpu_addr);
204int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
205 unsigned long size, int align,
206 u32 domain, struct amdgpu_bo **bo_ptr,
207 u64 *gpu_addr, void **cpu_addr);
208void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
209 void **cpu_addr);
210int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
211void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
212void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
213struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
214void amdgpu_bo_unref(struct amdgpu_bo **bo);
215int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
216int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
217 u64 min_offset, u64 max_offset,
218 u64 *gpu_addr);
219int amdgpu_bo_unpin(struct amdgpu_bo *bo);
220int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
221int amdgpu_bo_init(struct amdgpu_device *adev);
222void amdgpu_bo_fini(struct amdgpu_device *adev);
223int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
224 struct vm_area_struct *vma);
225int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
226void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
227int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
228 uint32_t metadata_size, uint64_t flags);
229int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
230 size_t buffer_size, uint32_t *metadata_size,
231 uint64_t *flags);
232void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
233 bool evict,
234 struct ttm_mem_reg *new_mem);
235int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
236void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
237 bool shared);
238u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
239int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
240 struct amdgpu_ring *ring,
241 struct amdgpu_bo *bo,
242 struct reservation_object *resv,
243 struct dma_fence **fence, bool direct);
244int amdgpu_bo_validate(struct amdgpu_bo *bo);
245int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
246 struct amdgpu_ring *ring,
247 struct amdgpu_bo *bo,
248 struct reservation_object *resv,
249 struct dma_fence **fence,
250 bool direct);
251
252
253
254
255
256
257static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
258{
259 return sa_bo->manager->gpu_addr + sa_bo->soffset;
260}
261
262static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
263{
264 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
265}
266
267int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
268 struct amdgpu_sa_manager *sa_manager,
269 unsigned size, u32 align, u32 domain);
270void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
271 struct amdgpu_sa_manager *sa_manager);
272int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
273 struct amdgpu_sa_manager *sa_manager);
274int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
275 struct amdgpu_sa_manager *sa_manager);
276int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
277 struct amdgpu_sa_bo **sa_bo,
278 unsigned size, unsigned align);
279void amdgpu_sa_bo_free(struct amdgpu_device *adev,
280 struct amdgpu_sa_bo **sa_bo,
281 struct dma_fence *fence);
282#if defined(CONFIG_DEBUG_FS)
283void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
284 struct seq_file *m);
285#endif
286
287
288#endif
289