1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#ifdef CONFIG_X86
31#include <asm/set_memory.h>
32#endif
33#include "radeon.h"
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
69{
70 void *ptr;
71
72 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
73 &rdev->gart.table_addr);
74 if (ptr == NULL) {
75 return -ENOMEM;
76 }
77#ifdef CONFIG_X86
78 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
79 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
80 set_memory_uc((unsigned long)ptr,
81 rdev->gart.table_size >> PAGE_SHIFT);
82 }
83#endif
84 rdev->gart.ptr = ptr;
85 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
86 return 0;
87}
88
89
90
91
92
93
94
95
96
97
98void radeon_gart_table_ram_free(struct radeon_device *rdev)
99{
100 if (rdev->gart.ptr == NULL) {
101 return;
102 }
103#ifdef CONFIG_X86
104 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
105 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
106 set_memory_wb((unsigned long)rdev->gart.ptr,
107 rdev->gart.table_size >> PAGE_SHIFT);
108 }
109#endif
110 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
111 (void *)rdev->gart.ptr,
112 rdev->gart.table_addr);
113 rdev->gart.ptr = NULL;
114 rdev->gart.table_addr = 0;
115}
116
117
118
119
120
121
122
123
124
125
126
127int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
128{
129 int r;
130
131 if (rdev->gart.robj == NULL) {
132 r = radeon_bo_create(rdev, rdev->gart.table_size,
133 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
134 0, NULL, NULL, &rdev->gart.robj);
135 if (r) {
136 return r;
137 }
138 }
139 return 0;
140}
141
142
143
144
145
146
147
148
149
150
151
152int radeon_gart_table_vram_pin(struct radeon_device *rdev)
153{
154 uint64_t gpu_addr;
155 int r;
156
157 r = radeon_bo_reserve(rdev->gart.robj, false);
158 if (unlikely(r != 0))
159 return r;
160 r = radeon_bo_pin(rdev->gart.robj,
161 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
162 if (r) {
163 radeon_bo_unreserve(rdev->gart.robj);
164 return r;
165 }
166 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
167 if (r)
168 radeon_bo_unpin(rdev->gart.robj);
169 radeon_bo_unreserve(rdev->gart.robj);
170 rdev->gart.table_addr = gpu_addr;
171
172 if (!r) {
173 int i;
174
175
176
177
178 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
179 radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
180 mb();
181 radeon_gart_tlb_flush(rdev);
182 }
183
184 return r;
185}
186
187
188
189
190
191
192
193
194
195void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
196{
197 int r;
198
199 if (rdev->gart.robj == NULL) {
200 return;
201 }
202 r = radeon_bo_reserve(rdev->gart.robj, false);
203 if (likely(r == 0)) {
204 radeon_bo_kunmap(rdev->gart.robj);
205 radeon_bo_unpin(rdev->gart.robj);
206 radeon_bo_unreserve(rdev->gart.robj);
207 rdev->gart.ptr = NULL;
208 }
209}
210
211
212
213
214
215
216
217
218
219
220void radeon_gart_table_vram_free(struct radeon_device *rdev)
221{
222 if (rdev->gart.robj == NULL) {
223 return;
224 }
225 radeon_bo_unref(&rdev->gart.robj);
226}
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
242 int pages)
243{
244 unsigned t;
245 unsigned p;
246 int i, j;
247
248 if (!rdev->gart.ready) {
249 WARN(1, "trying to unbind memory from uninitialized GART !\n");
250 return;
251 }
252 t = offset / RADEON_GPU_PAGE_SIZE;
253 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
254 for (i = 0; i < pages; i++, p++) {
255 if (rdev->gart.pages[p]) {
256 rdev->gart.pages[p] = NULL;
257 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
258 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
259 if (rdev->gart.ptr) {
260 radeon_gart_set_page(rdev, t,
261 rdev->dummy_page.entry);
262 }
263 }
264 }
265 }
266 if (rdev->gart.ptr) {
267 mb();
268 radeon_gart_tlb_flush(rdev);
269 }
270}
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
287 int pages, struct page **pagelist, dma_addr_t *dma_addr,
288 uint32_t flags)
289{
290 unsigned t;
291 unsigned p;
292 uint64_t page_base, page_entry;
293 int i, j;
294
295 if (!rdev->gart.ready) {
296 WARN(1, "trying to bind memory to uninitialized GART !\n");
297 return -EINVAL;
298 }
299 t = offset / RADEON_GPU_PAGE_SIZE;
300 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
301
302 for (i = 0; i < pages; i++, p++) {
303 rdev->gart.pages[p] = pagelist[i];
304 page_base = dma_addr[i];
305 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
306 page_entry = radeon_gart_get_page_entry(page_base, flags);
307 rdev->gart.pages_entry[t] = page_entry;
308 if (rdev->gart.ptr) {
309 radeon_gart_set_page(rdev, t, page_entry);
310 }
311 page_base += RADEON_GPU_PAGE_SIZE;
312 }
313 }
314 if (rdev->gart.ptr) {
315 mb();
316 radeon_gart_tlb_flush(rdev);
317 }
318 return 0;
319}
320
321
322
323
324
325
326
327
328
329int radeon_gart_init(struct radeon_device *rdev)
330{
331 int r, i;
332
333 if (rdev->gart.pages) {
334 return 0;
335 }
336
337 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
338 DRM_ERROR("Page size is smaller than GPU page size!\n");
339 return -EINVAL;
340 }
341 r = radeon_dummy_page_init(rdev);
342 if (r)
343 return r;
344
345 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
346 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
347 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
348 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
349
350 rdev->gart.pages = vzalloc(array_size(sizeof(void *),
351 rdev->gart.num_cpu_pages));
352 if (rdev->gart.pages == NULL) {
353 radeon_gart_fini(rdev);
354 return -ENOMEM;
355 }
356 rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
357 rdev->gart.num_gpu_pages));
358 if (rdev->gart.pages_entry == NULL) {
359 radeon_gart_fini(rdev);
360 return -ENOMEM;
361 }
362
363 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
364 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
365 return 0;
366}
367
368
369
370
371
372
373
374
375void radeon_gart_fini(struct radeon_device *rdev)
376{
377 if (rdev->gart.ready) {
378
379 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
380 }
381 rdev->gart.ready = false;
382 vfree(rdev->gart.pages);
383 vfree(rdev->gart.pages_entry);
384 rdev->gart.pages = NULL;
385 rdev->gart.pages_entry = NULL;
386
387 radeon_dummy_page_fini(rdev);
388}
389