1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <drm/drmP.h>
23#include <linux/shmem_fs.h>
24#include "psb_drv.h"
25
26
27
28
29
30
31
32
33
34
35
36
37
38static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
39{
40 uint32_t mask = PSB_PTE_VALID;
41
42
43
44 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
45
46 if (type & PSB_MMU_CACHED_MEMORY)
47 mask |= PSB_PTE_CACHED;
48 if (type & PSB_MMU_RO_MEMORY)
49 mask |= PSB_PTE_RO;
50 if (type & PSB_MMU_WO_MEMORY)
51 mask |= PSB_PTE_WO;
52
53 return (pfn << PAGE_SHIFT) | mask;
54}
55
56
57
58
59
60
61
62
63
64static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
65{
66 struct drm_psb_private *dev_priv = dev->dev_private;
67 unsigned long offset;
68
69 offset = r->resource.start - dev_priv->gtt_mem->start;
70
71 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
72}
73
74
75
76
77
78
79
80
81
82
83static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
84 int resume)
85{
86 u32 __iomem *gtt_slot;
87 u32 pte;
88 struct page **pages;
89 int i;
90
91 if (r->pages == NULL) {
92 WARN_ON(1);
93 return -EINVAL;
94 }
95
96 WARN_ON(r->stolen);
97
98 gtt_slot = psb_gtt_entry(dev, r);
99 pages = r->pages;
100
101 if (!resume) {
102
103 set_pages_array_wc(pages, r->npage);
104 }
105
106
107 for (i = r->roll; i < r->npage; i++) {
108 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
109 iowrite32(pte, gtt_slot++);
110 }
111 for (i = 0; i < r->roll; i++) {
112 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
113 iowrite32(pte, gtt_slot++);
114 }
115
116 ioread32(gtt_slot - 1);
117
118 return 0;
119}
120
121
122
123
124
125
126
127
128
129
130static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
131{
132 struct drm_psb_private *dev_priv = dev->dev_private;
133 u32 __iomem *gtt_slot;
134 u32 pte;
135 int i;
136
137 WARN_ON(r->stolen);
138
139 gtt_slot = psb_gtt_entry(dev, r);
140 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
141
142 for (i = 0; i < r->npage; i++)
143 iowrite32(pte, gtt_slot++);
144 ioread32(gtt_slot - 1);
145 set_pages_array_wb(r->pages, r->npage);
146}
147
148
149
150
151
152
153
154
155
156
157
158void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
159{
160 u32 __iomem *gtt_slot;
161 u32 pte;
162 int i;
163
164 if (roll >= r->npage) {
165 WARN_ON(1);
166 return;
167 }
168
169 r->roll = roll;
170
171
172
173 if (!r->stolen && !r->in_gart)
174 return;
175
176 gtt_slot = psb_gtt_entry(dev, r);
177
178 for (i = r->roll; i < r->npage; i++) {
179 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
180 iowrite32(pte, gtt_slot++);
181 }
182 for (i = 0; i < r->roll; i++) {
183 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
184 iowrite32(pte, gtt_slot++);
185 }
186 ioread32(gtt_slot - 1);
187}
188
189
190
191
192
193
194
195
196
197static int psb_gtt_attach_pages(struct gtt_range *gt)
198{
199 struct inode *inode;
200 struct address_space *mapping;
201 int i;
202 struct page *p;
203 int pages = gt->gem.size / PAGE_SIZE;
204
205 WARN_ON(gt->pages);
206
207
208 inode = file_inode(gt->gem.filp);
209 mapping = inode->i_mapping;
210
211 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
212 if (gt->pages == NULL)
213 return -ENOMEM;
214 gt->npage = pages;
215
216 for (i = 0; i < pages; i++) {
217 p = shmem_read_mapping_page(mapping, i);
218 if (IS_ERR(p))
219 goto err;
220 gt->pages[i] = p;
221 }
222 return 0;
223
224err:
225 while (i--)
226 page_cache_release(gt->pages[i]);
227 kfree(gt->pages);
228 gt->pages = NULL;
229 return PTR_ERR(p);
230}
231
232
233
234
235
236
237
238
239
240
241static void psb_gtt_detach_pages(struct gtt_range *gt)
242{
243 int i;
244 for (i = 0; i < gt->npage; i++) {
245
246 set_page_dirty(gt->pages[i]);
247 page_cache_release(gt->pages[i]);
248 }
249 kfree(gt->pages);
250 gt->pages = NULL;
251}
252
253
254
255
256
257
258
259
260
261
262
263int psb_gtt_pin(struct gtt_range *gt)
264{
265 int ret = 0;
266 struct drm_device *dev = gt->gem.dev;
267 struct drm_psb_private *dev_priv = dev->dev_private;
268
269 mutex_lock(&dev_priv->gtt_mutex);
270
271 if (gt->in_gart == 0 && gt->stolen == 0) {
272 ret = psb_gtt_attach_pages(gt);
273 if (ret < 0)
274 goto out;
275 ret = psb_gtt_insert(dev, gt, 0);
276 if (ret < 0) {
277 psb_gtt_detach_pages(gt);
278 goto out;
279 }
280 }
281 gt->in_gart++;
282out:
283 mutex_unlock(&dev_priv->gtt_mutex);
284 return ret;
285}
286
287
288
289
290
291
292
293
294
295
296
297
298void psb_gtt_unpin(struct gtt_range *gt)
299{
300 struct drm_device *dev = gt->gem.dev;
301 struct drm_psb_private *dev_priv = dev->dev_private;
302
303 mutex_lock(&dev_priv->gtt_mutex);
304
305 WARN_ON(!gt->in_gart);
306
307 gt->in_gart--;
308 if (gt->in_gart == 0 && gt->stolen == 0) {
309 psb_gtt_remove(dev, gt);
310 psb_gtt_detach_pages(gt);
311 }
312 mutex_unlock(&dev_priv->gtt_mutex);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
334 const char *name, int backed)
335{
336 struct drm_psb_private *dev_priv = dev->dev_private;
337 struct gtt_range *gt;
338 struct resource *r = dev_priv->gtt_mem;
339 int ret;
340 unsigned long start, end;
341
342 if (backed) {
343
344 start = r->start;
345 end = r->start + dev_priv->gtt.stolen_size - 1;
346 } else {
347
348 start = r->start + dev_priv->gtt.stolen_size;
349 end = r->end;
350 }
351
352 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
353 if (gt == NULL)
354 return NULL;
355 gt->resource.name = name;
356 gt->stolen = backed;
357 gt->in_gart = backed;
358 gt->roll = 0;
359
360 gt->gem.dev = dev;
361 ret = allocate_resource(dev_priv->gtt_mem, >->resource,
362 len, start, end, PAGE_SIZE, NULL, NULL);
363 if (ret == 0) {
364 gt->offset = gt->resource.start - r->start;
365 return gt;
366 }
367 kfree(gt);
368 return NULL;
369}
370
371
372
373
374
375
376
377
378
379void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
380{
381
382 if (gt->mmapping) {
383 psb_gtt_unpin(gt);
384 gt->mmapping = 0;
385 }
386 WARN_ON(gt->in_gart && !gt->stolen);
387 release_resource(>->resource);
388 kfree(gt);
389}
390
391static void psb_gtt_alloc(struct drm_device *dev)
392{
393 struct drm_psb_private *dev_priv = dev->dev_private;
394 init_rwsem(&dev_priv->gtt.sem);
395}
396
397void psb_gtt_takedown(struct drm_device *dev)
398{
399 struct drm_psb_private *dev_priv = dev->dev_private;
400
401 if (dev_priv->gtt_map) {
402 iounmap(dev_priv->gtt_map);
403 dev_priv->gtt_map = NULL;
404 }
405 if (dev_priv->gtt_initialized) {
406 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
407 dev_priv->gmch_ctrl);
408 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
409 (void) PSB_RVDC32(PSB_PGETBL_CTL);
410 }
411 if (dev_priv->vram_addr)
412 iounmap(dev_priv->gtt_map);
413}
414
415int psb_gtt_init(struct drm_device *dev, int resume)
416{
417 struct drm_psb_private *dev_priv = dev->dev_private;
418 unsigned gtt_pages;
419 unsigned long stolen_size, vram_stolen_size;
420 unsigned i, num_pages;
421 unsigned pfn_base;
422 struct psb_gtt *pg;
423
424 int ret = 0;
425 uint32_t pte;
426
427 if (!resume) {
428 mutex_init(&dev_priv->gtt_mutex);
429 psb_gtt_alloc(dev);
430 }
431
432 pg = &dev_priv->gtt;
433
434
435 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
436 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
437 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
438
439 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
440 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
441 (void) PSB_RVDC32(PSB_PGETBL_CTL);
442
443
444 dev_priv->gtt_initialized = 1;
445
446 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
447
448
449
450
451
452
453
454 pg->mmu_gatt_start = 0xE0000000;
455
456 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
457 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
458 >> PAGE_SHIFT;
459
460 if (pg->gtt_start == 0 || gtt_pages == 0) {
461 dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
462 gtt_pages = 64;
463 pg->gtt_start = dev_priv->pge_ctl;
464 }
465
466 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
467 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
468 >> PAGE_SHIFT;
469 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
470
471 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
472 static struct resource fudge;
473
474
475
476 dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
477 pg->gatt_start = 0x40000000;
478 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
479
480
481
482
483 fudge.start = 0x40000000;
484 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
485 fudge.name = "fudge";
486 fudge.flags = IORESOURCE_MEM;
487 dev_priv->gtt_mem = &fudge;
488 }
489
490 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
491 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
492 - PAGE_SIZE;
493
494 stolen_size = vram_stolen_size;
495
496 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
497 dev_priv->stolen_base, vram_stolen_size / 1024);
498
499 if (resume && (gtt_pages != pg->gtt_pages) &&
500 (stolen_size != pg->stolen_size)) {
501 dev_err(dev->dev, "GTT resume error.\n");
502 ret = -EINVAL;
503 goto out_err;
504 }
505
506 pg->gtt_pages = gtt_pages;
507 pg->stolen_size = stolen_size;
508 dev_priv->vram_stolen_size = vram_stolen_size;
509
510
511
512
513 if (!resume)
514 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
515 gtt_pages << PAGE_SHIFT);
516 if (!dev_priv->gtt_map) {
517 dev_err(dev->dev, "Failure to map gtt.\n");
518 ret = -ENOMEM;
519 goto out_err;
520 }
521
522 if (!resume)
523 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
524 stolen_size);
525 if (!dev_priv->vram_addr) {
526 dev_err(dev->dev, "Failure to map stolen base.\n");
527 ret = -ENOMEM;
528 goto out_err;
529 }
530
531
532
533
534
535 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
536 num_pages = vram_stolen_size >> PAGE_SHIFT;
537 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
538 num_pages, pfn_base << PAGE_SHIFT, 0);
539 for (i = 0; i < num_pages; ++i) {
540 pte = psb_gtt_mask_pte(pfn_base + i, 0);
541 iowrite32(pte, dev_priv->gtt_map + i);
542 }
543
544
545
546
547
548 pfn_base = page_to_pfn(dev_priv->scratch_page);
549 pte = psb_gtt_mask_pte(pfn_base, 0);
550 for (; i < gtt_pages; ++i)
551 iowrite32(pte, dev_priv->gtt_map + i);
552
553 (void) ioread32(dev_priv->gtt_map + i - 1);
554 return 0;
555
556out_err:
557 psb_gtt_takedown(dev);
558 return ret;
559}
560
561int psb_gtt_restore(struct drm_device *dev)
562{
563 struct drm_psb_private *dev_priv = dev->dev_private;
564 struct resource *r = dev_priv->gtt_mem->child;
565 struct gtt_range *range;
566 unsigned int restored = 0, total = 0, size = 0;
567
568
569 mutex_lock(&dev_priv->gtt_mutex);
570 psb_gtt_init(dev, 1);
571
572 while (r != NULL) {
573 range = container_of(r, struct gtt_range, resource);
574 if (range->pages) {
575 psb_gtt_insert(dev, range, 1);
576 size += range->resource.end - range->resource.start;
577 restored++;
578 }
579 r = r->sibling;
580 total++;
581 }
582 mutex_unlock(&dev_priv->gtt_mutex);
583 DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
584 total, (size / 1024));
585
586 return 0;
587}
588