1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h>
30#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
33#include "amdgpu.h"
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
70{
71 struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
72
73 if (adev->dummy_page_addr)
74 return 0;
75 adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
76 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
77 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
78 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
79 adev->dummy_page_addr = 0;
80 return -ENOMEM;
81 }
82 return 0;
83}
84
85
86
87
88
89
90
91
92static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
93{
94 if (!adev->dummy_page_addr)
95 return;
96 pci_unmap_page(adev->pdev, adev->dummy_page_addr,
97 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
98 adev->dummy_page_addr = 0;
99}
100
101
102
103
104
105
106
107
108
109
110
111int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
112{
113 int r;
114
115 if (adev->gart.robj == NULL) {
116 r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
117 AMDGPU_GEM_DOMAIN_VRAM,
118 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
119 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
120 ttm_bo_type_kernel, NULL,
121 &adev->gart.robj);
122 if (r) {
123 return r;
124 }
125 }
126 return 0;
127}
128
129
130
131
132
133
134
135
136
137
138
139int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
140{
141 uint64_t gpu_addr;
142 int r;
143
144 r = amdgpu_bo_reserve(adev->gart.robj, false);
145 if (unlikely(r != 0))
146 return r;
147 r = amdgpu_bo_pin(adev->gart.robj,
148 AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
149 if (r) {
150 amdgpu_bo_unreserve(adev->gart.robj);
151 return r;
152 }
153 r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
154 if (r)
155 amdgpu_bo_unpin(adev->gart.robj);
156 amdgpu_bo_unreserve(adev->gart.robj);
157 adev->gart.table_addr = gpu_addr;
158 return r;
159}
160
161
162
163
164
165
166
167
168
169void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
170{
171 int r;
172
173 if (adev->gart.robj == NULL) {
174 return;
175 }
176 r = amdgpu_bo_reserve(adev->gart.robj, true);
177 if (likely(r == 0)) {
178 amdgpu_bo_kunmap(adev->gart.robj);
179 amdgpu_bo_unpin(adev->gart.robj);
180 amdgpu_bo_unreserve(adev->gart.robj);
181 adev->gart.ptr = NULL;
182 }
183}
184
185
186
187
188
189
190
191
192
193
194void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
195{
196 if (adev->gart.robj == NULL) {
197 return;
198 }
199 amdgpu_bo_unref(&adev->gart.robj);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
217 int pages)
218{
219 unsigned t;
220 unsigned p;
221 int i, j;
222 u64 page_base;
223
224 uint64_t flags = 0;
225
226 if (!adev->gart.ready) {
227 WARN(1, "trying to unbind memory from uninitialized GART !\n");
228 return -EINVAL;
229 }
230
231 t = offset / AMDGPU_GPU_PAGE_SIZE;
232 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
233 for (i = 0; i < pages; i++, p++) {
234#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
235 adev->gart.pages[p] = NULL;
236#endif
237 page_base = adev->dummy_page_addr;
238 if (!adev->gart.ptr)
239 continue;
240
241 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
242 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
243 t, page_base, flags);
244 page_base += AMDGPU_GPU_PAGE_SIZE;
245 }
246 }
247 mb();
248 amdgpu_asic_flush_hdp(adev, NULL);
249 amdgpu_gmc_flush_gpu_tlb(adev, 0);
250 return 0;
251}
252
253
254
255
256
257
258
259
260
261
262
263
264int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
265 int pages, dma_addr_t *dma_addr, uint64_t flags,
266 void *dst)
267{
268 uint64_t page_base;
269 unsigned i, j, t;
270
271 if (!adev->gart.ready) {
272 WARN(1, "trying to bind memory to uninitialized GART !\n");
273 return -EINVAL;
274 }
275
276 t = offset / AMDGPU_GPU_PAGE_SIZE;
277
278 for (i = 0; i < pages; i++) {
279 page_base = dma_addr[i];
280 for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
281 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
282 page_base += AMDGPU_GPU_PAGE_SIZE;
283 }
284 }
285 return 0;
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
302 int pages, struct page **pagelist, dma_addr_t *dma_addr,
303 uint64_t flags)
304{
305#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
306 unsigned i,t,p;
307#endif
308 int r;
309
310 if (!adev->gart.ready) {
311 WARN(1, "trying to bind memory to uninitialized GART !\n");
312 return -EINVAL;
313 }
314
315#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
316 t = offset / AMDGPU_GPU_PAGE_SIZE;
317 p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
318 for (i = 0; i < pages; i++, p++)
319 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
320#endif
321
322 if (!adev->gart.ptr)
323 return 0;
324
325 r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
326 adev->gart.ptr);
327 if (r)
328 return r;
329
330 mb();
331 amdgpu_asic_flush_hdp(adev, NULL);
332 amdgpu_gmc_flush_gpu_tlb(adev, 0);
333 return 0;
334}
335
336
337
338
339
340
341
342
343
344int amdgpu_gart_init(struct amdgpu_device *adev)
345{
346 int r;
347
348 if (adev->dummy_page_addr)
349 return 0;
350
351
352 if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
353 DRM_ERROR("Page size is smaller than GPU page size!\n");
354 return -EINVAL;
355 }
356 r = amdgpu_gart_dummy_page_init(adev);
357 if (r)
358 return r;
359
360 adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
361 adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
362 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
363 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
364
365#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
366
367 adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
368 if (adev->gart.pages == NULL)
369 return -ENOMEM;
370#endif
371
372 return 0;
373}
374
375
376
377
378
379
380
381
382void amdgpu_gart_fini(struct amdgpu_device *adev)
383{
384#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
385 vfree(adev->gart.pages);
386 adev->gart.pages = NULL;
387#endif
388 amdgpu_gart_dummy_page_fini(adev);
389}
390