1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <drm/drmP.h>
23#include <linux/shmem_fs.h>
24#include "psb_drv.h"
25#include "blitter.h"
26
27
28
29
30
31
32
33
34
35
36
37
38
39static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
40{
41 uint32_t mask = PSB_PTE_VALID;
42
43
44
45 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
46
47 if (type & PSB_MMU_CACHED_MEMORY)
48 mask |= PSB_PTE_CACHED;
49 if (type & PSB_MMU_RO_MEMORY)
50 mask |= PSB_PTE_RO;
51 if (type & PSB_MMU_WO_MEMORY)
52 mask |= PSB_PTE_WO;
53
54 return (pfn << PAGE_SHIFT) | mask;
55}
56
57
58
59
60
61
62
63
64
65static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
66{
67 struct drm_psb_private *dev_priv = dev->dev_private;
68 unsigned long offset;
69
70 offset = r->resource.start - dev_priv->gtt_mem->start;
71
72 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
73}
74
75
76
77
78
79
80
81
82
83
84
85static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
86 int resume)
87{
88 u32 __iomem *gtt_slot;
89 u32 pte;
90 struct page **pages;
91 int i;
92
93 if (r->pages == NULL) {
94 WARN_ON(1);
95 return -EINVAL;
96 }
97
98 WARN_ON(r->stolen);
99
100 gtt_slot = psb_gtt_entry(dev, r);
101 pages = r->pages;
102
103 if (!resume) {
104
105 set_pages_array_wc(pages, r->npage);
106 }
107
108
109 for (i = r->roll; i < r->npage; i++) {
110 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
111 PSB_MMU_CACHED_MEMORY);
112 iowrite32(pte, gtt_slot++);
113 }
114 for (i = 0; i < r->roll; i++) {
115 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
116 PSB_MMU_CACHED_MEMORY);
117 iowrite32(pte, gtt_slot++);
118 }
119
120 ioread32(gtt_slot - 1);
121
122 return 0;
123}
124
125
126
127
128
129
130
131
132
133
134static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
135{
136 struct drm_psb_private *dev_priv = dev->dev_private;
137 u32 __iomem *gtt_slot;
138 u32 pte;
139 int i;
140
141 WARN_ON(r->stolen);
142
143 gtt_slot = psb_gtt_entry(dev, r);
144 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
145 PSB_MMU_CACHED_MEMORY);
146
147 for (i = 0; i < r->npage; i++)
148 iowrite32(pte, gtt_slot++);
149 ioread32(gtt_slot - 1);
150 set_pages_array_wb(r->pages, r->npage);
151}
152
153
154
155
156
157
158
159
160
161
162
163void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
164{
165 u32 __iomem *gtt_slot;
166 u32 pte;
167 int i;
168
169 if (roll >= r->npage) {
170 WARN_ON(1);
171 return;
172 }
173
174 r->roll = roll;
175
176
177
178 if (!r->stolen && !r->in_gart)
179 return;
180
181 gtt_slot = psb_gtt_entry(dev, r);
182
183 for (i = r->roll; i < r->npage; i++) {
184 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
185 PSB_MMU_CACHED_MEMORY);
186 iowrite32(pte, gtt_slot++);
187 }
188 for (i = 0; i < r->roll; i++) {
189 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
190 PSB_MMU_CACHED_MEMORY);
191 iowrite32(pte, gtt_slot++);
192 }
193 ioread32(gtt_slot - 1);
194}
195
196
197
198
199
200
201
202
203
204static int psb_gtt_attach_pages(struct gtt_range *gt)
205{
206 struct page **pages;
207
208 WARN_ON(gt->pages);
209
210 pages = drm_gem_get_pages(>->gem);
211 if (IS_ERR(pages))
212 return PTR_ERR(pages);
213
214 gt->npage = gt->gem.size / PAGE_SIZE;
215 gt->pages = pages;
216
217 return 0;
218}
219
220
221
222
223
224
225
226
227
228
229static void psb_gtt_detach_pages(struct gtt_range *gt)
230{
231 drm_gem_put_pages(>->gem, gt->pages, true, false);
232 gt->pages = NULL;
233}
234
235
236
237
238
239
240
241
242
243
244
245int psb_gtt_pin(struct gtt_range *gt)
246{
247 int ret = 0;
248 struct drm_device *dev = gt->gem.dev;
249 struct drm_psb_private *dev_priv = dev->dev_private;
250 u32 gpu_base = dev_priv->gtt.gatt_start;
251
252 mutex_lock(&dev_priv->gtt_mutex);
253
254 if (gt->in_gart == 0 && gt->stolen == 0) {
255 ret = psb_gtt_attach_pages(gt);
256 if (ret < 0)
257 goto out;
258 ret = psb_gtt_insert(dev, gt, 0);
259 if (ret < 0) {
260 psb_gtt_detach_pages(gt);
261 goto out;
262 }
263 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
264 gt->pages, (gpu_base + gt->offset),
265 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
266 }
267 gt->in_gart++;
268out:
269 mutex_unlock(&dev_priv->gtt_mutex);
270 return ret;
271}
272
273
274
275
276
277
278
279
280
281
282
283
284void psb_gtt_unpin(struct gtt_range *gt)
285{
286 struct drm_device *dev = gt->gem.dev;
287 struct drm_psb_private *dev_priv = dev->dev_private;
288 u32 gpu_base = dev_priv->gtt.gatt_start;
289 int ret;
290
291
292 mutex_lock(&dev_priv->gtt_mutex);
293
294
295 ret = gma_blt_wait_idle(dev_priv);
296 if (ret) {
297 DRM_ERROR("Failed to idle the blitter, unpin failed!");
298 goto out;
299 }
300
301 WARN_ON(!gt->in_gart);
302
303 gt->in_gart--;
304 if (gt->in_gart == 0 && gt->stolen == 0) {
305 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
306 (gpu_base + gt->offset), gt->npage, 0, 0);
307 psb_gtt_remove(dev, gt);
308 psb_gtt_detach_pages(gt);
309 }
310
311out:
312 mutex_unlock(&dev_priv->gtt_mutex);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
335 const char *name, int backed, u32 align)
336{
337 struct drm_psb_private *dev_priv = dev->dev_private;
338 struct gtt_range *gt;
339 struct resource *r = dev_priv->gtt_mem;
340 int ret;
341 unsigned long start, end;
342
343 if (backed) {
344
345 start = r->start;
346 end = r->start + dev_priv->gtt.stolen_size - 1;
347 } else {
348
349 start = r->start + dev_priv->gtt.stolen_size;
350 end = r->end;
351 }
352
353 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
354 if (gt == NULL)
355 return NULL;
356 gt->resource.name = name;
357 gt->stolen = backed;
358 gt->in_gart = backed;
359 gt->roll = 0;
360
361 gt->gem.dev = dev;
362 ret = allocate_resource(dev_priv->gtt_mem, >->resource,
363 len, start, end, align, NULL, NULL);
364 if (ret == 0) {
365 gt->offset = gt->resource.start - r->start;
366 return gt;
367 }
368 kfree(gt);
369 return NULL;
370}
371
372
373
374
375
376
377
378
379
380void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
381{
382
383 if (gt->mmapping) {
384 psb_gtt_unpin(gt);
385 gt->mmapping = 0;
386 }
387 WARN_ON(gt->in_gart && !gt->stolen);
388 release_resource(>->resource);
389 kfree(gt);
390}
391
392static void psb_gtt_alloc(struct drm_device *dev)
393{
394 struct drm_psb_private *dev_priv = dev->dev_private;
395 init_rwsem(&dev_priv->gtt.sem);
396}
397
398void psb_gtt_takedown(struct drm_device *dev)
399{
400 struct drm_psb_private *dev_priv = dev->dev_private;
401
402 if (dev_priv->gtt_map) {
403 iounmap(dev_priv->gtt_map);
404 dev_priv->gtt_map = NULL;
405 }
406 if (dev_priv->gtt_initialized) {
407 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
408 dev_priv->gmch_ctrl);
409 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
410 (void) PSB_RVDC32(PSB_PGETBL_CTL);
411 }
412 if (dev_priv->vram_addr)
413 iounmap(dev_priv->gtt_map);
414}
415
416int psb_gtt_init(struct drm_device *dev, int resume)
417{
418 struct drm_psb_private *dev_priv = dev->dev_private;
419 unsigned gtt_pages;
420 unsigned long stolen_size, vram_stolen_size;
421 unsigned i, num_pages;
422 unsigned pfn_base;
423 struct psb_gtt *pg;
424
425 int ret = 0;
426 uint32_t pte;
427
428 if (!resume) {
429 mutex_init(&dev_priv->gtt_mutex);
430 mutex_init(&dev_priv->mmap_mutex);
431 psb_gtt_alloc(dev);
432 }
433
434 pg = &dev_priv->gtt;
435
436
437 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
438 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
439 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
440
441 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
442 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
443 (void) PSB_RVDC32(PSB_PGETBL_CTL);
444
445
446 dev_priv->gtt_initialized = 1;
447
448 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
449
450
451
452
453
454
455
456 pg->mmu_gatt_start = 0xE0000000;
457
458 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
459 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
460 >> PAGE_SHIFT;
461
462 if (pg->gtt_start == 0 || gtt_pages == 0) {
463 dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
464 gtt_pages = 64;
465 pg->gtt_start = dev_priv->pge_ctl;
466 }
467
468 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
469 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
470 >> PAGE_SHIFT;
471 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
472
473 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
474 static struct resource fudge;
475
476
477
478 dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
479 pg->gatt_start = 0x40000000;
480 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
481
482
483
484
485 fudge.start = 0x40000000;
486 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
487 fudge.name = "fudge";
488 fudge.flags = IORESOURCE_MEM;
489 dev_priv->gtt_mem = &fudge;
490 }
491
492 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
493 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
494 - PAGE_SIZE;
495
496 stolen_size = vram_stolen_size;
497
498 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
499 dev_priv->stolen_base, vram_stolen_size / 1024);
500
501 if (resume && (gtt_pages != pg->gtt_pages) &&
502 (stolen_size != pg->stolen_size)) {
503 dev_err(dev->dev, "GTT resume error.\n");
504 ret = -EINVAL;
505 goto out_err;
506 }
507
508 pg->gtt_pages = gtt_pages;
509 pg->stolen_size = stolen_size;
510 dev_priv->vram_stolen_size = vram_stolen_size;
511
512
513
514
515 if (!resume)
516 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
517 gtt_pages << PAGE_SHIFT);
518 if (!dev_priv->gtt_map) {
519 dev_err(dev->dev, "Failure to map gtt.\n");
520 ret = -ENOMEM;
521 goto out_err;
522 }
523
524 if (!resume)
525 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
526 stolen_size);
527
528 if (!dev_priv->vram_addr) {
529 dev_err(dev->dev, "Failure to map stolen base.\n");
530 ret = -ENOMEM;
531 goto out_err;
532 }
533
534
535
536
537
538 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
539 num_pages = vram_stolen_size >> PAGE_SHIFT;
540 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
541 num_pages, pfn_base << PAGE_SHIFT, 0);
542 for (i = 0; i < num_pages; ++i) {
543 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
544 iowrite32(pte, dev_priv->gtt_map + i);
545 }
546
547
548
549
550
551 pfn_base = page_to_pfn(dev_priv->scratch_page);
552 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
553 for (; i < gtt_pages; ++i)
554 iowrite32(pte, dev_priv->gtt_map + i);
555
556 (void) ioread32(dev_priv->gtt_map + i - 1);
557 return 0;
558
559out_err:
560 psb_gtt_takedown(dev);
561 return ret;
562}
563
564int psb_gtt_restore(struct drm_device *dev)
565{
566 struct drm_psb_private *dev_priv = dev->dev_private;
567 struct resource *r = dev_priv->gtt_mem->child;
568 struct gtt_range *range;
569 unsigned int restored = 0, total = 0, size = 0;
570
571
572 mutex_lock(&dev_priv->gtt_mutex);
573 psb_gtt_init(dev, 1);
574
575 while (r != NULL) {
576 range = container_of(r, struct gtt_range, resource);
577 if (range->pages) {
578 psb_gtt_insert(dev, range, 1);
579 size += range->resource.end - range->resource.start;
580 restored++;
581 }
582 r = r->sibling;
583 total++;
584 }
585 mutex_unlock(&dev_priv->gtt_mutex);
586 DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
587 total, (size / 1024));
588
589 return 0;
590}
591