1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h>
30#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
33#include "amdgpu.h"
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
70{
71 struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
72
73 if (adev->dummy_page_addr)
74 return 0;
75 adev->dummy_page_addr = pci_map_page(adev->pdev, dummy_page, 0,
76 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
77 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page_addr)) {
78 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
79 adev->dummy_page_addr = 0;
80 return -ENOMEM;
81 }
82 return 0;
83}
84
85
86
87
88
89
90
91
92static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
93{
94 if (!adev->dummy_page_addr)
95 return;
96 pci_unmap_page(adev->pdev, adev->dummy_page_addr,
97 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
98 adev->dummy_page_addr = 0;
99}
100
101
102
103
104
105
106
107
108
109
110
111int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
112{
113 int r;
114
115 if (adev->gart.robj == NULL) {
116 struct amdgpu_bo_param bp;
117
118 memset(&bp, 0, sizeof(bp));
119 bp.size = adev->gart.table_size;
120 bp.byte_align = PAGE_SIZE;
121 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
122 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
123 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
124 bp.type = ttm_bo_type_kernel;
125 bp.resv = NULL;
126 r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
127 if (r) {
128 return r;
129 }
130 }
131 return 0;
132}
133
134
135
136
137
138
139
140
141
142
143
144int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
145{
146 int r;
147
148 r = amdgpu_bo_reserve(adev->gart.robj, false);
149 if (unlikely(r != 0))
150 return r;
151 r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
152 if (r) {
153 amdgpu_bo_unreserve(adev->gart.robj);
154 return r;
155 }
156 r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
157 if (r)
158 amdgpu_bo_unpin(adev->gart.robj);
159 amdgpu_bo_unreserve(adev->gart.robj);
160 adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
161 return r;
162}
163
164
165
166
167
168
169
170
171
172void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
173{
174 int r;
175
176 if (adev->gart.robj == NULL) {
177 return;
178 }
179 r = amdgpu_bo_reserve(adev->gart.robj, true);
180 if (likely(r == 0)) {
181 amdgpu_bo_kunmap(adev->gart.robj);
182 amdgpu_bo_unpin(adev->gart.robj);
183 amdgpu_bo_unreserve(adev->gart.robj);
184 adev->gart.ptr = NULL;
185 }
186}
187
188
189
190
191
192
193
194
195
196
197void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
198{
199 if (adev->gart.robj == NULL) {
200 return;
201 }
202 amdgpu_bo_unref(&adev->gart.robj);
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
220 int pages)
221{
222 unsigned t;
223 unsigned p;
224 int i, j;
225 u64 page_base;
226
227 uint64_t flags = 0;
228
229 if (!adev->gart.ready) {
230 WARN(1, "trying to unbind memory from uninitialized GART !\n");
231 return -EINVAL;
232 }
233
234 t = offset / AMDGPU_GPU_PAGE_SIZE;
235 p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
236 for (i = 0; i < pages; i++, p++) {
237#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
238 adev->gart.pages[p] = NULL;
239#endif
240 page_base = adev->dummy_page_addr;
241 if (!adev->gart.ptr)
242 continue;
243
244 for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
245 amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
246 t, page_base, flags);
247 page_base += AMDGPU_GPU_PAGE_SIZE;
248 }
249 }
250 mb();
251 amdgpu_asic_flush_hdp(adev, NULL);
252 amdgpu_gmc_flush_gpu_tlb(adev, 0);
253 return 0;
254}
255
256
257
258
259
260
261
262
263
264
265
266
267int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
268 int pages, dma_addr_t *dma_addr, uint64_t flags,
269 void *dst)
270{
271 uint64_t page_base;
272 unsigned i, j, t;
273
274 if (!adev->gart.ready) {
275 WARN(1, "trying to bind memory to uninitialized GART !\n");
276 return -EINVAL;
277 }
278
279 t = offset / AMDGPU_GPU_PAGE_SIZE;
280
281 for (i = 0; i < pages; i++) {
282 page_base = dma_addr[i];
283 for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
284 amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
285 page_base += AMDGPU_GPU_PAGE_SIZE;
286 }
287 }
288 return 0;
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
305 int pages, struct page **pagelist, dma_addr_t *dma_addr,
306 uint64_t flags)
307{
308#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
309 unsigned i,t,p;
310#endif
311 int r;
312
313 if (!adev->gart.ready) {
314 WARN(1, "trying to bind memory to uninitialized GART !\n");
315 return -EINVAL;
316 }
317
318#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
319 t = offset / AMDGPU_GPU_PAGE_SIZE;
320 p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
321 for (i = 0; i < pages; i++, p++)
322 adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
323#endif
324
325 if (!adev->gart.ptr)
326 return 0;
327
328 r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
329 adev->gart.ptr);
330 if (r)
331 return r;
332
333 mb();
334 amdgpu_asic_flush_hdp(adev, NULL);
335 amdgpu_gmc_flush_gpu_tlb(adev, 0);
336 return 0;
337}
338
339
340
341
342
343
344
345
346
347int amdgpu_gart_init(struct amdgpu_device *adev)
348{
349 int r;
350
351 if (adev->dummy_page_addr)
352 return 0;
353
354
355 if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
356 DRM_ERROR("Page size is smaller than GPU page size!\n");
357 return -EINVAL;
358 }
359 r = amdgpu_gart_dummy_page_init(adev);
360 if (r)
361 return r;
362
363 adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
364 adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
365 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
366 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
367
368#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
369
370 adev->gart.pages = vzalloc(array_size(sizeof(void *),
371 adev->gart.num_cpu_pages));
372 if (adev->gart.pages == NULL)
373 return -ENOMEM;
374#endif
375
376 return 0;
377}
378
379
380
381
382
383
384
385
386void amdgpu_gart_fini(struct amdgpu_device *adev)
387{
388#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
389 vfree(adev->gart.pages);
390 adev->gart.pages = NULL;
391#endif
392 amdgpu_gart_dummy_page_fini(adev);
393}
394