1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/ttm/ttm_range_manager.h>
26
27#include "amdgpu.h"
28
29struct amdgpu_gtt_node {
30 struct ttm_buffer_object *tbo;
31 struct ttm_range_mgr_node base;
32};
33
34static inline struct amdgpu_gtt_mgr *
35to_gtt_mgr(struct ttm_resource_manager *man)
36{
37 return container_of(man, struct amdgpu_gtt_mgr, manager);
38}
39
40static inline struct amdgpu_gtt_node *
41to_amdgpu_gtt_node(struct ttm_resource *res)
42{
43 return container_of(res, struct amdgpu_gtt_node, base.base);
44}
45
46
47
48
49
50
51
52
53
54static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
55 struct device_attribute *attr,
56 char *buf)
57{
58 struct drm_device *ddev = dev_get_drvdata(dev);
59 struct amdgpu_device *adev = drm_to_adev(ddev);
60 struct ttm_resource_manager *man;
61
62 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
63 return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
64}
65
66
67
68
69
70
71
72
73
74static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
75 struct device_attribute *attr,
76 char *buf)
77{
78 struct drm_device *ddev = dev_get_drvdata(dev);
79 struct amdgpu_device *adev = drm_to_adev(ddev);
80 struct ttm_resource_manager *man;
81
82 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
83 return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
84}
85
86static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
87 amdgpu_mem_info_gtt_total_show, NULL);
88static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
89 amdgpu_mem_info_gtt_used_show, NULL);
90
91static struct attribute *amdgpu_gtt_mgr_attributes[] = {
92 &dev_attr_mem_info_gtt_total.attr,
93 &dev_attr_mem_info_gtt_used.attr,
94 NULL
95};
96
97const struct attribute_group amdgpu_gtt_mgr_attr_group = {
98 .attrs = amdgpu_gtt_mgr_attributes
99};
100
101
102
103
104
105
106
107
108bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
109{
110 struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
111
112 return drm_mm_node_allocated(&node->base.mm_nodes[0]);
113}
114
115
116
117
118
119
120
121
122
123
124
125static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
126 struct ttm_buffer_object *tbo,
127 const struct ttm_place *place,
128 struct ttm_resource **res)
129{
130 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
131 uint32_t num_pages = PFN_UP(tbo->base.size);
132 struct amdgpu_gtt_node *node;
133 int r;
134
135 if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
136 atomic64_add_return(num_pages, &mgr->used) > man->size) {
137 atomic64_sub(num_pages, &mgr->used);
138 return -ENOSPC;
139 }
140
141 node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
142 if (!node) {
143 r = -ENOMEM;
144 goto err_out;
145 }
146
147 node->tbo = tbo;
148 ttm_resource_init(tbo, place, &node->base.base);
149
150 if (place->lpfn) {
151 spin_lock(&mgr->lock);
152 r = drm_mm_insert_node_in_range(&mgr->mm,
153 &node->base.mm_nodes[0],
154 num_pages, tbo->page_alignment,
155 0, place->fpfn, place->lpfn,
156 DRM_MM_INSERT_BEST);
157 spin_unlock(&mgr->lock);
158 if (unlikely(r))
159 goto err_free;
160
161 node->base.base.start = node->base.mm_nodes[0].start;
162 } else {
163 node->base.mm_nodes[0].start = 0;
164 node->base.mm_nodes[0].size = node->base.base.num_pages;
165 node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
166 }
167
168 *res = &node->base.base;
169 return 0;
170
171err_free:
172 kfree(node);
173
174err_out:
175 if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
176 atomic64_sub(num_pages, &mgr->used);
177
178 return r;
179}
180
181
182
183
184
185
186
187
188
189static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
190 struct ttm_resource *res)
191{
192 struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
193 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
194
195 spin_lock(&mgr->lock);
196 if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
197 drm_mm_remove_node(&node->base.mm_nodes[0]);
198 spin_unlock(&mgr->lock);
199
200 if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
201 atomic64_sub(res->num_pages, &mgr->used);
202
203 kfree(node);
204}
205
206
207
208
209
210
211
212
213uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
214{
215 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
216
217 return atomic64_read(&mgr->used) * PAGE_SIZE;
218}
219
220
221
222
223
224
225
226
227int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
228{
229 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
230 struct amdgpu_device *adev;
231 struct amdgpu_gtt_node *node;
232 struct drm_mm_node *mm_node;
233 int r = 0;
234
235 adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
236 spin_lock(&mgr->lock);
237 drm_mm_for_each_node(mm_node, &mgr->mm) {
238 node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
239 r = amdgpu_ttm_recover_gart(node->tbo);
240 if (r)
241 break;
242 }
243 spin_unlock(&mgr->lock);
244
245 amdgpu_gart_invalidate_tlb(adev);
246
247 return r;
248}
249
250
251
252
253
254
255
256
257
258static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
259 struct drm_printer *printer)
260{
261 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
262
263 spin_lock(&mgr->lock);
264 drm_mm_print(&mgr->mm, printer);
265 spin_unlock(&mgr->lock);
266
267 drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n",
268 man->size, atomic64_read(&mgr->used));
269}
270
271static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
272 .alloc = amdgpu_gtt_mgr_new,
273 .free = amdgpu_gtt_mgr_del,
274 .debug = amdgpu_gtt_mgr_debug
275};
276
277
278
279
280
281
282
283
284
285int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
286{
287 struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
288 struct ttm_resource_manager *man = &mgr->manager;
289 uint64_t start, size;
290
291 man->use_tt = true;
292 man->func = &amdgpu_gtt_mgr_func;
293
294 ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
295
296 start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
297 size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
298 drm_mm_init(&mgr->mm, start, size);
299 spin_lock_init(&mgr->lock);
300 atomic64_set(&mgr->used, 0);
301
302 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
303 ttm_resource_manager_set_used(man, true);
304 return 0;
305}
306
307
308
309
310
311
312
313
314
315void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
316{
317 struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
318 struct ttm_resource_manager *man = &mgr->manager;
319 int ret;
320
321 ttm_resource_manager_set_used(man, false);
322
323 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
324 if (ret)
325 return;
326
327 spin_lock(&mgr->lock);
328 drm_mm_takedown(&mgr->mm);
329 spin_unlock(&mgr->lock);
330
331 ttm_resource_manager_cleanup(man);
332 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL);
333}
334