1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <drm/drmP.h>
23#include <linux/shmem_fs.h>
24#include <asm/set_memory.h>
25#include "psb_drv.h"
26#include "blitter.h"
27
28
29
30
31
32
33
34
35
36
37
38
39
40static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
41{
42 uint32_t mask = PSB_PTE_VALID;
43
44
45
46 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
47
48 if (type & PSB_MMU_CACHED_MEMORY)
49 mask |= PSB_PTE_CACHED;
50 if (type & PSB_MMU_RO_MEMORY)
51 mask |= PSB_PTE_RO;
52 if (type & PSB_MMU_WO_MEMORY)
53 mask |= PSB_PTE_WO;
54
55 return (pfn << PAGE_SHIFT) | mask;
56}
57
58
59
60
61
62
63
64
65
66static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
67{
68 struct drm_psb_private *dev_priv = dev->dev_private;
69 unsigned long offset;
70
71 offset = r->resource.start - dev_priv->gtt_mem->start;
72
73 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
74}
75
76
77
78
79
80
81
82
83
84
85
86static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
87 int resume)
88{
89 u32 __iomem *gtt_slot;
90 u32 pte;
91 struct page **pages;
92 int i;
93
94 if (r->pages == NULL) {
95 WARN_ON(1);
96 return -EINVAL;
97 }
98
99 WARN_ON(r->stolen);
100
101 gtt_slot = psb_gtt_entry(dev, r);
102 pages = r->pages;
103
104 if (!resume) {
105
106 set_pages_array_wc(pages, r->npage);
107 }
108
109
110 for (i = r->roll; i < r->npage; i++) {
111 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
112 PSB_MMU_CACHED_MEMORY);
113 iowrite32(pte, gtt_slot++);
114 }
115 for (i = 0; i < r->roll; i++) {
116 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
117 PSB_MMU_CACHED_MEMORY);
118 iowrite32(pte, gtt_slot++);
119 }
120
121 ioread32(gtt_slot - 1);
122
123 return 0;
124}
125
126
127
128
129
130
131
132
133
134
135static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
136{
137 struct drm_psb_private *dev_priv = dev->dev_private;
138 u32 __iomem *gtt_slot;
139 u32 pte;
140 int i;
141
142 WARN_ON(r->stolen);
143
144 gtt_slot = psb_gtt_entry(dev, r);
145 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
146 PSB_MMU_CACHED_MEMORY);
147
148 for (i = 0; i < r->npage; i++)
149 iowrite32(pte, gtt_slot++);
150 ioread32(gtt_slot - 1);
151 set_pages_array_wb(r->pages, r->npage);
152}
153
154
155
156
157
158
159
160
161
162
163
164void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
165{
166 u32 __iomem *gtt_slot;
167 u32 pte;
168 int i;
169
170 if (roll >= r->npage) {
171 WARN_ON(1);
172 return;
173 }
174
175 r->roll = roll;
176
177
178
179 if (!r->stolen && !r->in_gart)
180 return;
181
182 gtt_slot = psb_gtt_entry(dev, r);
183
184 for (i = r->roll; i < r->npage; i++) {
185 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
186 PSB_MMU_CACHED_MEMORY);
187 iowrite32(pte, gtt_slot++);
188 }
189 for (i = 0; i < r->roll; i++) {
190 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
191 PSB_MMU_CACHED_MEMORY);
192 iowrite32(pte, gtt_slot++);
193 }
194 ioread32(gtt_slot - 1);
195}
196
197
198
199
200
201
202
203
204
205static int psb_gtt_attach_pages(struct gtt_range *gt)
206{
207 struct page **pages;
208
209 WARN_ON(gt->pages);
210
211 pages = drm_gem_get_pages(>->gem);
212 if (IS_ERR(pages))
213 return PTR_ERR(pages);
214
215 gt->npage = gt->gem.size / PAGE_SIZE;
216 gt->pages = pages;
217
218 return 0;
219}
220
221
222
223
224
225
226
227
228
229
230static void psb_gtt_detach_pages(struct gtt_range *gt)
231{
232 drm_gem_put_pages(>->gem, gt->pages, true, false);
233 gt->pages = NULL;
234}
235
236
237
238
239
240
241
242
243
244
245
246int psb_gtt_pin(struct gtt_range *gt)
247{
248 int ret = 0;
249 struct drm_device *dev = gt->gem.dev;
250 struct drm_psb_private *dev_priv = dev->dev_private;
251 u32 gpu_base = dev_priv->gtt.gatt_start;
252
253 mutex_lock(&dev_priv->gtt_mutex);
254
255 if (gt->in_gart == 0 && gt->stolen == 0) {
256 ret = psb_gtt_attach_pages(gt);
257 if (ret < 0)
258 goto out;
259 ret = psb_gtt_insert(dev, gt, 0);
260 if (ret < 0) {
261 psb_gtt_detach_pages(gt);
262 goto out;
263 }
264 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
265 gt->pages, (gpu_base + gt->offset),
266 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
267 }
268 gt->in_gart++;
269out:
270 mutex_unlock(&dev_priv->gtt_mutex);
271 return ret;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285void psb_gtt_unpin(struct gtt_range *gt)
286{
287 struct drm_device *dev = gt->gem.dev;
288 struct drm_psb_private *dev_priv = dev->dev_private;
289 u32 gpu_base = dev_priv->gtt.gatt_start;
290 int ret;
291
292
293 mutex_lock(&dev_priv->gtt_mutex);
294
295
296 ret = gma_blt_wait_idle(dev_priv);
297 if (ret) {
298 DRM_ERROR("Failed to idle the blitter, unpin failed!");
299 goto out;
300 }
301
302 WARN_ON(!gt->in_gart);
303
304 gt->in_gart--;
305 if (gt->in_gart == 0 && gt->stolen == 0) {
306 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
307 (gpu_base + gt->offset), gt->npage, 0, 0);
308 psb_gtt_remove(dev, gt);
309 psb_gtt_detach_pages(gt);
310 }
311
312out:
313 mutex_unlock(&dev_priv->gtt_mutex);
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
336 const char *name, int backed, u32 align)
337{
338 struct drm_psb_private *dev_priv = dev->dev_private;
339 struct gtt_range *gt;
340 struct resource *r = dev_priv->gtt_mem;
341 int ret;
342 unsigned long start, end;
343
344 if (backed) {
345
346 start = r->start;
347 end = r->start + dev_priv->gtt.stolen_size - 1;
348 } else {
349
350 start = r->start + dev_priv->gtt.stolen_size;
351 end = r->end;
352 }
353
354 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
355 if (gt == NULL)
356 return NULL;
357 gt->resource.name = name;
358 gt->stolen = backed;
359 gt->in_gart = backed;
360 gt->roll = 0;
361
362 gt->gem.dev = dev;
363 ret = allocate_resource(dev_priv->gtt_mem, >->resource,
364 len, start, end, align, NULL, NULL);
365 if (ret == 0) {
366 gt->offset = gt->resource.start - r->start;
367 return gt;
368 }
369 kfree(gt);
370 return NULL;
371}
372
373
374
375
376
377
378
379
380
381void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
382{
383
384 if (gt->mmapping) {
385 psb_gtt_unpin(gt);
386 gt->mmapping = 0;
387 }
388 WARN_ON(gt->in_gart && !gt->stolen);
389 release_resource(>->resource);
390 kfree(gt);
391}
392
393static void psb_gtt_alloc(struct drm_device *dev)
394{
395 struct drm_psb_private *dev_priv = dev->dev_private;
396 init_rwsem(&dev_priv->gtt.sem);
397}
398
399void psb_gtt_takedown(struct drm_device *dev)
400{
401 struct drm_psb_private *dev_priv = dev->dev_private;
402
403 if (dev_priv->gtt_map) {
404 iounmap(dev_priv->gtt_map);
405 dev_priv->gtt_map = NULL;
406 }
407 if (dev_priv->gtt_initialized) {
408 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
409 dev_priv->gmch_ctrl);
410 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
411 (void) PSB_RVDC32(PSB_PGETBL_CTL);
412 }
413 if (dev_priv->vram_addr)
414 iounmap(dev_priv->gtt_map);
415}
416
417int psb_gtt_init(struct drm_device *dev, int resume)
418{
419 struct drm_psb_private *dev_priv = dev->dev_private;
420 unsigned gtt_pages;
421 unsigned long stolen_size, vram_stolen_size;
422 unsigned i, num_pages;
423 unsigned pfn_base;
424 struct psb_gtt *pg;
425
426 int ret = 0;
427 uint32_t pte;
428
429 if (!resume) {
430 mutex_init(&dev_priv->gtt_mutex);
431 mutex_init(&dev_priv->mmap_mutex);
432 psb_gtt_alloc(dev);
433 }
434
435 pg = &dev_priv->gtt;
436
437
438 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
439 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
440 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
441
442 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
443 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
444 (void) PSB_RVDC32(PSB_PGETBL_CTL);
445
446
447 dev_priv->gtt_initialized = 1;
448
449 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
450
451
452
453
454
455
456
457 pg->mmu_gatt_start = 0xE0000000;
458
459 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
460 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
461 >> PAGE_SHIFT;
462
463 if (pg->gtt_start == 0 || gtt_pages == 0) {
464 dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
465 gtt_pages = 64;
466 pg->gtt_start = dev_priv->pge_ctl;
467 }
468
469 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
470 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
471 >> PAGE_SHIFT;
472 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
473
474 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
475 static struct resource fudge;
476
477
478
479 dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
480 pg->gatt_start = 0x40000000;
481 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
482
483
484
485
486 fudge.start = 0x40000000;
487 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
488 fudge.name = "fudge";
489 fudge.flags = IORESOURCE_MEM;
490 dev_priv->gtt_mem = &fudge;
491 }
492
493 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
494 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
495 - PAGE_SIZE;
496
497 stolen_size = vram_stolen_size;
498
499 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
500 dev_priv->stolen_base, vram_stolen_size / 1024);
501
502 if (resume && (gtt_pages != pg->gtt_pages) &&
503 (stolen_size != pg->stolen_size)) {
504 dev_err(dev->dev, "GTT resume error.\n");
505 ret = -EINVAL;
506 goto out_err;
507 }
508
509 pg->gtt_pages = gtt_pages;
510 pg->stolen_size = stolen_size;
511 dev_priv->vram_stolen_size = vram_stolen_size;
512
513
514
515
516 if (!resume)
517 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
518 gtt_pages << PAGE_SHIFT);
519 if (!dev_priv->gtt_map) {
520 dev_err(dev->dev, "Failure to map gtt.\n");
521 ret = -ENOMEM;
522 goto out_err;
523 }
524
525 if (!resume)
526 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
527 stolen_size);
528
529 if (!dev_priv->vram_addr) {
530 dev_err(dev->dev, "Failure to map stolen base.\n");
531 ret = -ENOMEM;
532 goto out_err;
533 }
534
535
536
537
538
539 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
540 num_pages = vram_stolen_size >> PAGE_SHIFT;
541 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
542 num_pages, pfn_base << PAGE_SHIFT, 0);
543 for (i = 0; i < num_pages; ++i) {
544 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
545 iowrite32(pte, dev_priv->gtt_map + i);
546 }
547
548
549
550
551
552 pfn_base = page_to_pfn(dev_priv->scratch_page);
553 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
554 for (; i < gtt_pages; ++i)
555 iowrite32(pte, dev_priv->gtt_map + i);
556
557 (void) ioread32(dev_priv->gtt_map + i - 1);
558 return 0;
559
560out_err:
561 psb_gtt_takedown(dev);
562 return ret;
563}
564
565int psb_gtt_restore(struct drm_device *dev)
566{
567 struct drm_psb_private *dev_priv = dev->dev_private;
568 struct resource *r = dev_priv->gtt_mem->child;
569 struct gtt_range *range;
570 unsigned int restored = 0, total = 0, size = 0;
571
572
573 mutex_lock(&dev_priv->gtt_mutex);
574 psb_gtt_init(dev, 1);
575
576 while (r != NULL) {
577 range = container_of(r, struct gtt_range, resource);
578 if (range->pages) {
579 psb_gtt_insert(dev, range, 1);
580 size += range->resource.end - range->resource.start;
581 restored++;
582 }
583 r = r->sibling;
584 total++;
585 }
586 mutex_unlock(&dev_priv->gtt_mutex);
587 DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
588 total, (size / 1024));
589
590 return 0;
591}
592