1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include "drmP.h"
37#if defined(__ia64__)
38#include <linux/efi.h>
39#endif
40
41static void drm_vm_open(struct vm_area_struct *vma);
42static void drm_vm_close(struct vm_area_struct *vma);
43
44static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
45{
46 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
47
48#if defined(__i386__) || defined(__x86_64__)
49 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50 pgprot_val(tmp) |= _PAGE_PCD;
51 pgprot_val(tmp) &= ~_PAGE_PWT;
52 }
53#elif defined(__powerpc__)
54 pgprot_val(tmp) |= _PAGE_NO_CACHE;
55 if (map_type == _DRM_REGISTERS)
56 pgprot_val(tmp) |= _PAGE_GUARDED;
57#elif defined(__ia64__)
58 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
59 vma->vm_start))
60 tmp = pgprot_writecombine(tmp);
61 else
62 tmp = pgprot_noncached(tmp);
63#elif defined(__sparc__)
64 tmp = pgprot_noncached(tmp);
65#endif
66 return tmp;
67}
68
69static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
70{
71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
72
73#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
74 tmp |= _PAGE_NO_CACHE;
75#endif
76 return tmp;
77}
78
79
80
81
82
83
84
85
86
87
88
89#if __OS_HAS_AGP
90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91{
92 struct drm_file *priv = vma->vm_file->private_data;
93 struct drm_device *dev = priv->minor->dev;
94 struct drm_local_map *map = NULL;
95 struct drm_map_list *r_list;
96 struct drm_hash_item *hash;
97
98
99
100
101 if (!drm_core_has_AGP(dev))
102 goto vm_fault_error;
103
104 if (!dev->agp || !dev->agp->cant_use_aperture)
105 goto vm_fault_error;
106
107 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
108 goto vm_fault_error;
109
110 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
111 map = r_list->map;
112
113 if (map && map->type == _DRM_AGP) {
114
115
116
117
118 resource_size_t offset = (unsigned long)vmf->virtual_address -
119 vma->vm_start;
120 resource_size_t baddr = map->offset + offset;
121 struct drm_agp_mem *agpmem;
122 struct page *page;
123
124#ifdef __alpha__
125
126
127
128 baddr -= dev->hose->mem_space->start;
129#endif
130
131
132
133
134 list_for_each_entry(agpmem, &dev->agp->memory, head) {
135 if (agpmem->bound <= baddr &&
136 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
137 break;
138 }
139
140 if (!agpmem)
141 goto vm_fault_error;
142
143
144
145
146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
147 page = agpmem->memory->pages[offset];
148 get_page(page);
149 vmf->page = page;
150
151 DRM_DEBUG
152 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
153 (unsigned long long)baddr,
154 agpmem->memory->pages[offset],
155 (unsigned long long)offset,
156 page_count(page));
157 return 0;
158 }
159vm_fault_error:
160 return VM_FAULT_SIGBUS;
161}
162#else
163static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
164{
165 return VM_FAULT_SIGBUS;
166}
167#endif
168
169
170
171
172
173
174
175
176
177
178
179static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
181 struct drm_local_map *map = vma->vm_private_data;
182 unsigned long offset;
183 unsigned long i;
184 struct page *page;
185
186 if (!map)
187 return VM_FAULT_SIGBUS;
188
189 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
190 i = (unsigned long)map->handle + offset;
191 page = vmalloc_to_page((void *)i);
192 if (!page)
193 return VM_FAULT_SIGBUS;
194 get_page(page);
195 vmf->page = page;
196
197 DRM_DEBUG("shm_fault 0x%lx\n", offset);
198 return 0;
199}
200
201
202
203
204
205
206
207
208
209static void drm_vm_shm_close(struct vm_area_struct *vma)
210{
211 struct drm_file *priv = vma->vm_file->private_data;
212 struct drm_device *dev = priv->minor->dev;
213 struct drm_vma_entry *pt, *temp;
214 struct drm_local_map *map;
215 struct drm_map_list *r_list;
216 int found_maps = 0;
217
218 DRM_DEBUG("0x%08lx,0x%08lx\n",
219 vma->vm_start, vma->vm_end - vma->vm_start);
220 atomic_dec(&dev->vma_count);
221
222 map = vma->vm_private_data;
223
224 mutex_lock(&dev->struct_mutex);
225 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
226 if (pt->vma->vm_private_data == map)
227 found_maps++;
228 if (pt->vma == vma) {
229 list_del(&pt->head);
230 kfree(pt);
231 }
232 }
233
234
235 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
236
237
238
239 found_maps = 0;
240 list_for_each_entry(r_list, &dev->maplist, head) {
241 if (r_list->map == map)
242 found_maps++;
243 }
244
245 if (!found_maps) {
246 drm_dma_handle_t dmah;
247
248 switch (map->type) {
249 case _DRM_REGISTERS:
250 case _DRM_FRAME_BUFFER:
251 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
252 int retcode;
253 retcode = mtrr_del(map->mtrr,
254 map->offset,
255 map->size);
256 DRM_DEBUG("mtrr_del = %d\n", retcode);
257 }
258 iounmap(map->handle);
259 break;
260 case _DRM_SHM:
261 vfree(map->handle);
262 break;
263 case _DRM_AGP:
264 case _DRM_SCATTER_GATHER:
265 break;
266 case _DRM_CONSISTENT:
267 dmah.vaddr = map->handle;
268 dmah.busaddr = map->offset;
269 dmah.size = map->size;
270 __drm_pci_free(dev, &dmah);
271 break;
272 case _DRM_GEM:
273 DRM_ERROR("tried to rmmap GEM object\n");
274 break;
275 }
276 kfree(map);
277 }
278 }
279 mutex_unlock(&dev->struct_mutex);
280}
281
282
283
284
285
286
287
288
289
290
291static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
292{
293 struct drm_file *priv = vma->vm_file->private_data;
294 struct drm_device *dev = priv->minor->dev;
295 struct drm_device_dma *dma = dev->dma;
296 unsigned long offset;
297 unsigned long page_nr;
298 struct page *page;
299
300 if (!dma)
301 return VM_FAULT_SIGBUS;
302 if (!dma->pagelist)
303 return VM_FAULT_SIGBUS;
304
305 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
306 page_nr = offset >> PAGE_SHIFT;
307 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
308
309 get_page(page);
310 vmf->page = page;
311
312 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
313 return 0;
314}
315
316
317
318
319
320
321
322
323
324
325static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
326{
327 struct drm_local_map *map = vma->vm_private_data;
328 struct drm_file *priv = vma->vm_file->private_data;
329 struct drm_device *dev = priv->minor->dev;
330 struct drm_sg_mem *entry = dev->sg;
331 unsigned long offset;
332 unsigned long map_offset;
333 unsigned long page_offset;
334 struct page *page;
335
336 if (!entry)
337 return VM_FAULT_SIGBUS;
338 if (!entry->pagelist)
339 return VM_FAULT_SIGBUS;
340
341 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
342 map_offset = map->offset - (unsigned long)dev->sg->virtual;
343 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
344 page = entry->pagelist[page_offset];
345 get_page(page);
346 vmf->page = page;
347
348 return 0;
349}
350
351static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352{
353 return drm_do_vm_fault(vma, vmf);
354}
355
356static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
357{
358 return drm_do_vm_shm_fault(vma, vmf);
359}
360
361static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
362{
363 return drm_do_vm_dma_fault(vma, vmf);
364}
365
366static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
367{
368 return drm_do_vm_sg_fault(vma, vmf);
369}
370
371
372static const struct vm_operations_struct drm_vm_ops = {
373 .fault = drm_vm_fault,
374 .open = drm_vm_open,
375 .close = drm_vm_close,
376};
377
378
379static const struct vm_operations_struct drm_vm_shm_ops = {
380 .fault = drm_vm_shm_fault,
381 .open = drm_vm_open,
382 .close = drm_vm_shm_close,
383};
384
385
386static const struct vm_operations_struct drm_vm_dma_ops = {
387 .fault = drm_vm_dma_fault,
388 .open = drm_vm_open,
389 .close = drm_vm_close,
390};
391
392
393static const struct vm_operations_struct drm_vm_sg_ops = {
394 .fault = drm_vm_sg_fault,
395 .open = drm_vm_open,
396 .close = drm_vm_close,
397};
398
399
400
401
402
403
404
405
406
407void drm_vm_open_locked(struct vm_area_struct *vma)
408{
409 struct drm_file *priv = vma->vm_file->private_data;
410 struct drm_device *dev = priv->minor->dev;
411 struct drm_vma_entry *vma_entry;
412
413 DRM_DEBUG("0x%08lx,0x%08lx\n",
414 vma->vm_start, vma->vm_end - vma->vm_start);
415 atomic_inc(&dev->vma_count);
416
417 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
418 if (vma_entry) {
419 vma_entry->vma = vma;
420 vma_entry->pid = current->pid;
421 list_add(&vma_entry->head, &dev->vmalist);
422 }
423}
424
425static void drm_vm_open(struct vm_area_struct *vma)
426{
427 struct drm_file *priv = vma->vm_file->private_data;
428 struct drm_device *dev = priv->minor->dev;
429
430 mutex_lock(&dev->struct_mutex);
431 drm_vm_open_locked(vma);
432 mutex_unlock(&dev->struct_mutex);
433}
434
435
436
437
438
439
440
441
442
443static void drm_vm_close(struct vm_area_struct *vma)
444{
445 struct drm_file *priv = vma->vm_file->private_data;
446 struct drm_device *dev = priv->minor->dev;
447 struct drm_vma_entry *pt, *temp;
448
449 DRM_DEBUG("0x%08lx,0x%08lx\n",
450 vma->vm_start, vma->vm_end - vma->vm_start);
451 atomic_dec(&dev->vma_count);
452
453 mutex_lock(&dev->struct_mutex);
454 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
455 if (pt->vma == vma) {
456 list_del(&pt->head);
457 kfree(pt);
458 break;
459 }
460 }
461 mutex_unlock(&dev->struct_mutex);
462}
463
464
465
466
467
468
469
470
471
472
473
474static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
475{
476 struct drm_file *priv = filp->private_data;
477 struct drm_device *dev;
478 struct drm_device_dma *dma;
479 unsigned long length = vma->vm_end - vma->vm_start;
480
481 dev = priv->minor->dev;
482 dma = dev->dma;
483 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
484 vma->vm_start, vma->vm_end, vma->vm_pgoff);
485
486
487 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
488 return -EINVAL;
489 }
490
491 if (!capable(CAP_SYS_ADMIN) &&
492 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
493 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
494#if defined(__i386__) || defined(__x86_64__)
495 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
496#else
497
498
499
500 vma->vm_page_prot =
501 __pgprot(pte_val
502 (pte_wrprotect
503 (__pte(pgprot_val(vma->vm_page_prot)))));
504#endif
505 }
506
507 vma->vm_ops = &drm_vm_dma_ops;
508
509 vma->vm_flags |= VM_RESERVED;
510 vma->vm_flags |= VM_DONTEXPAND;
511
512 vma->vm_file = filp;
513 drm_vm_open_locked(vma);
514 return 0;
515}
516
517resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
518{
519 return map->offset;
520}
521
522EXPORT_SYMBOL(drm_core_get_map_ofs);
523
524resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
525{
526#ifdef __alpha__
527 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
528#else
529 return 0;
530#endif
531}
532
533EXPORT_SYMBOL(drm_core_get_reg_ofs);
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
549{
550 struct drm_file *priv = filp->private_data;
551 struct drm_device *dev = priv->minor->dev;
552 struct drm_local_map *map = NULL;
553 resource_size_t offset = 0;
554 struct drm_hash_item *hash;
555
556 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
557 vma->vm_start, vma->vm_end, vma->vm_pgoff);
558
559 if (!priv->authenticated)
560 return -EACCES;
561
562
563
564
565
566 if (!vma->vm_pgoff
567#if __OS_HAS_AGP
568 && (!dev->agp
569 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
570#endif
571 )
572 return drm_mmap_dma(filp, vma);
573
574 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
575 DRM_ERROR("Could not find map\n");
576 return -EINVAL;
577 }
578
579 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
580 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
581 return -EPERM;
582
583
584 if (map->size < vma->vm_end - vma->vm_start)
585 return -EINVAL;
586
587 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
588 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
589#if defined(__i386__) || defined(__x86_64__)
590 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
591#else
592
593
594
595 vma->vm_page_prot =
596 __pgprot(pte_val
597 (pte_wrprotect
598 (__pte(pgprot_val(vma->vm_page_prot)))));
599#endif
600 }
601
602 switch (map->type) {
603 case _DRM_AGP:
604 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
605
606
607
608
609
610#if defined(__powerpc__)
611 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
612#endif
613 vma->vm_ops = &drm_vm_ops;
614 break;
615 }
616
617 case _DRM_FRAME_BUFFER:
618 case _DRM_REGISTERS:
619 offset = dev->driver->get_reg_ofs(dev);
620 vma->vm_flags |= VM_IO;
621 vma->vm_page_prot = drm_io_prot(map->type, vma);
622 if (io_remap_pfn_range(vma, vma->vm_start,
623 (map->offset + offset) >> PAGE_SHIFT,
624 vma->vm_end - vma->vm_start,
625 vma->vm_page_prot))
626 return -EAGAIN;
627 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
628 " offset = 0x%llx\n",
629 map->type,
630 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
631 vma->vm_ops = &drm_vm_ops;
632 break;
633 case _DRM_CONSISTENT:
634
635
636 if (remap_pfn_range(vma, vma->vm_start,
637 page_to_pfn(virt_to_page(map->handle)),
638 vma->vm_end - vma->vm_start, vma->vm_page_prot))
639 return -EAGAIN;
640 vma->vm_page_prot = drm_dma_prot(map->type, vma);
641
642 case _DRM_SHM:
643 vma->vm_ops = &drm_vm_shm_ops;
644 vma->vm_private_data = (void *)map;
645
646
647 vma->vm_flags |= VM_RESERVED;
648 break;
649 case _DRM_SCATTER_GATHER:
650 vma->vm_ops = &drm_vm_sg_ops;
651 vma->vm_private_data = (void *)map;
652 vma->vm_flags |= VM_RESERVED;
653 vma->vm_page_prot = drm_dma_prot(map->type, vma);
654 break;
655 default:
656 return -EINVAL;
657 }
658 vma->vm_flags |= VM_RESERVED;
659 vma->vm_flags |= VM_DONTEXPAND;
660
661 vma->vm_file = filp;
662 drm_vm_open_locked(vma);
663 return 0;
664}
665
666int drm_mmap(struct file *filp, struct vm_area_struct *vma)
667{
668 struct drm_file *priv = filp->private_data;
669 struct drm_device *dev = priv->minor->dev;
670 int ret;
671
672 mutex_lock(&dev->struct_mutex);
673 ret = drm_mmap_locked(filp, vma);
674 mutex_unlock(&dev->struct_mutex);
675
676 return ret;
677}
678EXPORT_SYMBOL(drm_mmap);
679