1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <drm/drmP.h>
23#include "psb_drv.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
38{
39 uint32_t mask = PSB_PTE_VALID;
40
41 if (type & PSB_MMU_CACHED_MEMORY)
42 mask |= PSB_PTE_CACHED;
43 if (type & PSB_MMU_RO_MEMORY)
44 mask |= PSB_PTE_RO;
45 if (type & PSB_MMU_WO_MEMORY)
46 mask |= PSB_PTE_WO;
47
48 return (pfn << PAGE_SHIFT) | mask;
49}
50
51
52
53
54
55
56
57
58
59u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
60{
61 struct drm_psb_private *dev_priv = dev->dev_private;
62 unsigned long offset;
63
64 offset = r->resource.start - dev_priv->gtt_mem->start;
65
66 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
67}
68
69
70
71
72
73
74
75
76
77
78
79static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
80{
81 struct drm_psb_private *dev_priv = dev->dev_private;
82 u32 *gtt_slot, pte;
83 int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
84 struct page **pages;
85 int i;
86
87 if (r->pages == NULL) {
88 WARN_ON(1);
89 return -EINVAL;
90 }
91
92 WARN_ON(r->stolen);
93
94 gtt_slot = psb_gtt_entry(dev, r);
95 pages = r->pages;
96
97
98 wbinvd();
99
100
101 for (i = 0; i < numpages; i++) {
102 pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0);
103 iowrite32(pte, gtt_slot++);
104 }
105
106 ioread32(gtt_slot - 1);
107
108 return 0;
109}
110
111
112
113
114
115
116
117
118
119
120static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
121{
122 struct drm_psb_private *dev_priv = dev->dev_private;
123 u32 *gtt_slot, pte;
124 int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
125 int i;
126
127 WARN_ON(r->stolen);
128
129 gtt_slot = psb_gtt_entry(dev, r);
130 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);;
131
132 for (i = 0; i < numpages; i++)
133 iowrite32(pte, gtt_slot++);
134 ioread32(gtt_slot - 1);
135}
136
137
138
139
140
141
142
143
144
145
146static int psb_gtt_attach_pages(struct gtt_range *gt)
147{
148 struct inode *inode;
149 struct address_space *mapping;
150 int i;
151 struct page *p;
152 int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
153
154 WARN_ON(gt->pages);
155
156
157 inode = gt->gem.filp->f_path.dentry->d_inode;
158 mapping = inode->i_mapping;
159
160 gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
161 if (gt->pages == NULL)
162 return -ENOMEM;
163 for (i = 0; i < pages; i++) {
164
165 p = read_cache_page_gfp(mapping, i,
166 __GFP_COLD | GFP_KERNEL);
167 if (IS_ERR(p))
168 goto err;
169 gt->pages[i] = p;
170 }
171 return 0;
172
173err:
174 while (i--)
175 page_cache_release(gt->pages[i]);
176 kfree(gt->pages);
177 gt->pages = NULL;
178 return PTR_ERR(p);
179}
180
181
182
183
184
185
186
187
188
189
190
191static void psb_gtt_detach_pages(struct gtt_range *gt)
192{
193 int i;
194 int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
195
196 for (i = 0; i < pages; i++) {
197
198 set_page_dirty(gt->pages[i]);
199
200 page_cache_release(gt->pages[i]);
201 }
202 kfree(gt->pages);
203 gt->pages = NULL;
204}
205
206
207
208
209
210
211
212
213
214
215
216int psb_gtt_pin(struct gtt_range *gt)
217{
218 int ret;
219 struct drm_device *dev = gt->gem.dev;
220 struct drm_psb_private *dev_priv = dev->dev_private;
221
222 mutex_lock(&dev_priv->gtt_mutex);
223
224 if (gt->in_gart == 0 && gt->stolen == 0) {
225 ret = psb_gtt_attach_pages(gt);
226 if (ret < 0)
227 goto out;
228 ret = psb_gtt_insert(dev, gt);
229 if (ret < 0) {
230 psb_gtt_detach_pages(gt);
231 goto out;
232 }
233 }
234 gt->in_gart++;
235out:
236 mutex_unlock(&dev_priv->gtt_mutex);
237 return ret;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251void psb_gtt_unpin(struct gtt_range *gt)
252{
253 struct drm_device *dev = gt->gem.dev;
254 struct drm_psb_private *dev_priv = dev->dev_private;
255
256 mutex_lock(&dev_priv->gtt_mutex);
257
258 WARN_ON(!gt->in_gart);
259
260 gt->in_gart--;
261 if (gt->in_gart == 0 && gt->stolen == 0) {
262 psb_gtt_remove(dev, gt);
263 psb_gtt_detach_pages(gt);
264 }
265 mutex_unlock(&dev_priv->gtt_mutex);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
287 const char *name, int backed)
288{
289 struct drm_psb_private *dev_priv = dev->dev_private;
290 struct gtt_range *gt;
291 struct resource *r = dev_priv->gtt_mem;
292 int ret;
293 unsigned long start, end;
294
295 if (backed) {
296
297 start = r->start;
298 end = r->start + dev_priv->pg->stolen_size - 1;
299 } else {
300
301 start = r->start + dev_priv->pg->stolen_size;
302 end = r->end;
303 }
304
305 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
306 if (gt == NULL)
307 return NULL;
308 gt->resource.name = name;
309 gt->stolen = backed;
310 gt->in_gart = backed;
311
312 gt->gem.dev = dev;
313 kref_init(>->kref);
314
315 ret = allocate_resource(dev_priv->gtt_mem, >->resource,
316 len, start, end, PAGE_SIZE, NULL, NULL);
317 if (ret == 0) {
318 gt->offset = gt->resource.start - r->start;
319 return gt;
320 }
321 kfree(gt);
322 return NULL;
323}
324
325
326
327
328
329
330
331
332
333
334static void psb_gtt_destroy(struct kref *kref)
335{
336 struct gtt_range *gt = container_of(kref, struct gtt_range, kref);
337
338
339 if (gt->mmapping) {
340 psb_gtt_unpin(gt);
341 gt->mmapping = 0;
342 }
343 WARN_ON(gt->in_gart && !gt->stolen);
344 release_resource(>->resource);
345 kfree(gt);
346}
347
348
349
350
351
352
353
354void psb_gtt_kref_put(struct gtt_range *gt)
355{
356 kref_put(>->kref, psb_gtt_destroy);
357}
358
359
360
361
362
363
364
365
366void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
367{
368 psb_gtt_kref_put(gt);
369}
370
371
372struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
373{
374 struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
375
376 if (!tmp)
377 return NULL;
378
379 init_rwsem(&tmp->sem);
380 tmp->dev = dev;
381
382 return tmp;
383}
384
385void psb_gtt_takedown(struct drm_device *dev)
386{
387 struct drm_psb_private *dev_priv = dev->dev_private;
388
389
390 if (dev_priv->gtt_map) {
391 iounmap(dev_priv->gtt_map);
392 dev_priv->gtt_map = NULL;
393 }
394 if (dev_priv->gtt_initialized) {
395 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
396 dev_priv->gmch_ctrl);
397 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
398 (void) PSB_RVDC32(PSB_PGETBL_CTL);
399 }
400 kfree(dev_priv->pg);
401 dev_priv->pg = NULL;
402}
403
404int psb_gtt_init(struct drm_device *dev, int resume)
405{
406 struct drm_psb_private *dev_priv = dev->dev_private;
407 unsigned gtt_pages;
408 unsigned long stolen_size, vram_stolen_size;
409 unsigned i, num_pages;
410 unsigned pfn_base;
411 uint32_t vram_pages;
412 uint32_t tt_pages;
413 uint32_t *ttm_gtt_map;
414 uint32_t dvmt_mode = 0;
415 struct psb_gtt *pg;
416
417 int ret = 0;
418 uint32_t pte;
419
420 mutex_init(&dev_priv->gtt_mutex);
421
422 dev_priv->pg = pg = psb_gtt_alloc(dev);
423 if (pg == NULL)
424 return -ENOMEM;
425
426 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
427 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
428 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
429
430 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
431 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
432 (void) PSB_RVDC32(PSB_PGETBL_CTL);
433
434
435 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
436
437 dev_priv->gtt_initialized = 1;
438
439 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
440
441 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
442
443
444 pg->mmu_gatt_start = 0xE0000000;
445 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
446 gtt_pages =
447 pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
448 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
449 >> PAGE_SHIFT;
450
451 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
452 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
453
454 stolen_size = vram_stolen_size;
455
456 printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
457 pg->gatt_start, pg->gatt_pages/256);
458 printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
459 pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
460 printk(KERN_INFO "Stolen memory information\n");
461 printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
462 printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
463 vram_stolen_size/1024);
464 dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
465 printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
466 (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
467
468 if (resume && (gtt_pages != pg->gtt_pages) &&
469 (stolen_size != pg->stolen_size)) {
470 DRM_ERROR("GTT resume error.\n");
471 ret = -EINVAL;
472 goto out_err;
473 }
474
475 pg->gtt_pages = gtt_pages;
476 pg->stolen_size = stolen_size;
477 dev_priv->vram_stolen_size = vram_stolen_size;
478 dev_priv->gtt_map =
479 ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
480 if (!dev_priv->gtt_map) {
481 DRM_ERROR("Failure to map gtt.\n");
482 ret = -ENOMEM;
483 goto out_err;
484 }
485
486 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
487 if (!dev_priv->vram_addr) {
488 DRM_ERROR("Failure to map stolen base.\n");
489 ret = -ENOMEM;
490 goto out_err;
491 }
492
493 DRM_DEBUG("%s: vram kernel virtual address %p\n", dev_priv->vram_addr);
494
495 tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
496 (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
497
498 ttm_gtt_map = dev_priv->gtt_map + tt_pages / 2;
499
500
501
502
503
504 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
505 vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
506 printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
507 num_pages, pfn_base, 0);
508 for (i = 0; i < num_pages; ++i) {
509 pte = psb_gtt_mask_pte(pfn_base + i, 0);
510 iowrite32(pte, dev_priv->gtt_map + i);
511 }
512
513
514
515
516 pfn_base = page_to_pfn(dev_priv->scratch_page);
517 pte = psb_gtt_mask_pte(pfn_base, 0);
518 for (; i < tt_pages / 2 - 1; ++i)
519 iowrite32(pte, dev_priv->gtt_map + i);
520
521
522
523
524
525 pfn_base = page_to_pfn(dev_priv->scratch_page);
526 pte = psb_gtt_mask_pte(pfn_base, 0);
527 PSB_DEBUG_INIT("Initializing the rest of a total "
528 "of %d gtt pages.\n", pg->gatt_pages);
529
530 for (; i < pg->gatt_pages - tt_pages / 2; ++i)
531 iowrite32(pte, ttm_gtt_map + i);
532 (void) ioread32(dev_priv->gtt_map + i - 1);
533
534 return 0;
535
536out_err:
537 psb_gtt_takedown(dev);
538 return ret;
539}
540