1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include "drmP.h"
37#if defined(__ia64__)
38#include <linux/efi.h>
39#include <linux/slab.h>
40#endif
41
42static void drm_vm_open(struct vm_area_struct *vma);
43static void drm_vm_close(struct vm_area_struct *vma);
44
45static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
46{
47 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
48
49#if defined(__i386__) || defined(__x86_64__)
50 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
51 pgprot_val(tmp) |= _PAGE_PCD;
52 pgprot_val(tmp) &= ~_PAGE_PWT;
53 }
54#elif defined(__powerpc__)
55 pgprot_val(tmp) |= _PAGE_NO_CACHE;
56 if (map_type == _DRM_REGISTERS)
57 pgprot_val(tmp) |= _PAGE_GUARDED;
58#elif defined(__ia64__)
59 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60 vma->vm_start))
61 tmp = pgprot_writecombine(tmp);
62 else
63 tmp = pgprot_noncached(tmp);
64#elif defined(__sparc__) || defined(__arm__)
65 tmp = pgprot_noncached(tmp);
66#endif
67 return tmp;
68}
69
70static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
71{
72 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
73
74#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
75 tmp |= _PAGE_NO_CACHE;
76#endif
77 return tmp;
78}
79
80
81
82
83
84
85
86
87
88
89
90#if __OS_HAS_AGP
91static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
92{
93 struct drm_file *priv = vma->vm_file->private_data;
94 struct drm_device *dev = priv->minor->dev;
95 struct drm_local_map *map = NULL;
96 struct drm_map_list *r_list;
97 struct drm_hash_item *hash;
98
99
100
101
102 if (!drm_core_has_AGP(dev))
103 goto vm_fault_error;
104
105 if (!dev->agp || !dev->agp->cant_use_aperture)
106 goto vm_fault_error;
107
108 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
109 goto vm_fault_error;
110
111 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
112 map = r_list->map;
113
114 if (map && map->type == _DRM_AGP) {
115
116
117
118
119 resource_size_t offset = (unsigned long)vmf->virtual_address -
120 vma->vm_start;
121 resource_size_t baddr = map->offset + offset;
122 struct drm_agp_mem *agpmem;
123 struct page *page;
124
125#ifdef __alpha__
126
127
128
129 baddr -= dev->hose->mem_space->start;
130#endif
131
132
133
134
135 list_for_each_entry(agpmem, &dev->agp->memory, head) {
136 if (agpmem->bound <= baddr &&
137 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
138 break;
139 }
140
141 if (&agpmem->head == &dev->agp->memory)
142 goto vm_fault_error;
143
144
145
146
147 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
148 page = agpmem->memory->pages[offset];
149 get_page(page);
150 vmf->page = page;
151
152 DRM_DEBUG
153 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
154 (unsigned long long)baddr,
155 agpmem->memory->pages[offset],
156 (unsigned long long)offset,
157 page_count(page));
158 return 0;
159 }
160vm_fault_error:
161 return VM_FAULT_SIGBUS;
162}
163#else
164static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
165{
166 return VM_FAULT_SIGBUS;
167}
168#endif
169
170
171
172
173
174
175
176
177
178
179
180static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
181{
182 struct drm_local_map *map = vma->vm_private_data;
183 unsigned long offset;
184 unsigned long i;
185 struct page *page;
186
187 if (!map)
188 return VM_FAULT_SIGBUS;
189
190 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
191 i = (unsigned long)map->handle + offset;
192 page = vmalloc_to_page((void *)i);
193 if (!page)
194 return VM_FAULT_SIGBUS;
195 get_page(page);
196 vmf->page = page;
197
198 DRM_DEBUG("shm_fault 0x%lx\n", offset);
199 return 0;
200}
201
202
203
204
205
206
207
208
209
210static void drm_vm_shm_close(struct vm_area_struct *vma)
211{
212 struct drm_file *priv = vma->vm_file->private_data;
213 struct drm_device *dev = priv->minor->dev;
214 struct drm_vma_entry *pt, *temp;
215 struct drm_local_map *map;
216 struct drm_map_list *r_list;
217 int found_maps = 0;
218
219 DRM_DEBUG("0x%08lx,0x%08lx\n",
220 vma->vm_start, vma->vm_end - vma->vm_start);
221 atomic_dec(&dev->vma_count);
222
223 map = vma->vm_private_data;
224
225 mutex_lock(&dev->struct_mutex);
226 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
227 if (pt->vma->vm_private_data == map)
228 found_maps++;
229 if (pt->vma == vma) {
230 list_del(&pt->head);
231 kfree(pt);
232 }
233 }
234
235
236 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
237
238
239
240 found_maps = 0;
241 list_for_each_entry(r_list, &dev->maplist, head) {
242 if (r_list->map == map)
243 found_maps++;
244 }
245
246 if (!found_maps) {
247 drm_dma_handle_t dmah;
248
249 switch (map->type) {
250 case _DRM_REGISTERS:
251 case _DRM_FRAME_BUFFER:
252 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
253 int retcode;
254 retcode = mtrr_del(map->mtrr,
255 map->offset,
256 map->size);
257 DRM_DEBUG("mtrr_del = %d\n", retcode);
258 }
259 iounmap(map->handle);
260 break;
261 case _DRM_SHM:
262 vfree(map->handle);
263 break;
264 case _DRM_AGP:
265 case _DRM_SCATTER_GATHER:
266 break;
267 case _DRM_CONSISTENT:
268 dmah.vaddr = map->handle;
269 dmah.busaddr = map->offset;
270 dmah.size = map->size;
271 __drm_pci_free(dev, &dmah);
272 break;
273 case _DRM_GEM:
274 DRM_ERROR("tried to rmmap GEM object\n");
275 break;
276 }
277 kfree(map);
278 }
279 }
280 mutex_unlock(&dev->struct_mutex);
281}
282
283
284
285
286
287
288
289
290
291
292static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
293{
294 struct drm_file *priv = vma->vm_file->private_data;
295 struct drm_device *dev = priv->minor->dev;
296 struct drm_device_dma *dma = dev->dma;
297 unsigned long offset;
298 unsigned long page_nr;
299 struct page *page;
300
301 if (!dma)
302 return VM_FAULT_SIGBUS;
303 if (!dma->pagelist)
304 return VM_FAULT_SIGBUS;
305
306 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
307 page_nr = offset >> PAGE_SHIFT;
308 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
309
310 get_page(page);
311 vmf->page = page;
312
313 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
314 return 0;
315}
316
317
318
319
320
321
322
323
324
325
326static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
327{
328 struct drm_local_map *map = vma->vm_private_data;
329 struct drm_file *priv = vma->vm_file->private_data;
330 struct drm_device *dev = priv->minor->dev;
331 struct drm_sg_mem *entry = dev->sg;
332 unsigned long offset;
333 unsigned long map_offset;
334 unsigned long page_offset;
335 struct page *page;
336
337 if (!entry)
338 return VM_FAULT_SIGBUS;
339 if (!entry->pagelist)
340 return VM_FAULT_SIGBUS;
341
342 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
343 map_offset = map->offset - (unsigned long)dev->sg->virtual;
344 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
345 page = entry->pagelist[page_offset];
346 get_page(page);
347 vmf->page = page;
348
349 return 0;
350}
351
352static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
353{
354 return drm_do_vm_fault(vma, vmf);
355}
356
357static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
358{
359 return drm_do_vm_shm_fault(vma, vmf);
360}
361
362static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
363{
364 return drm_do_vm_dma_fault(vma, vmf);
365}
366
367static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
368{
369 return drm_do_vm_sg_fault(vma, vmf);
370}
371
372
373static const struct vm_operations_struct drm_vm_ops = {
374 .fault = drm_vm_fault,
375 .open = drm_vm_open,
376 .close = drm_vm_close,
377};
378
379
380static const struct vm_operations_struct drm_vm_shm_ops = {
381 .fault = drm_vm_shm_fault,
382 .open = drm_vm_open,
383 .close = drm_vm_shm_close,
384};
385
386
387static const struct vm_operations_struct drm_vm_dma_ops = {
388 .fault = drm_vm_dma_fault,
389 .open = drm_vm_open,
390 .close = drm_vm_close,
391};
392
393
394static const struct vm_operations_struct drm_vm_sg_ops = {
395 .fault = drm_vm_sg_fault,
396 .open = drm_vm_open,
397 .close = drm_vm_close,
398};
399
400
401
402
403
404
405
406
407
408void drm_vm_open_locked(struct vm_area_struct *vma)
409{
410 struct drm_file *priv = vma->vm_file->private_data;
411 struct drm_device *dev = priv->minor->dev;
412 struct drm_vma_entry *vma_entry;
413
414 DRM_DEBUG("0x%08lx,0x%08lx\n",
415 vma->vm_start, vma->vm_end - vma->vm_start);
416 atomic_inc(&dev->vma_count);
417
418 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
419 if (vma_entry) {
420 vma_entry->vma = vma;
421 vma_entry->pid = current->pid;
422 list_add(&vma_entry->head, &dev->vmalist);
423 }
424}
425
426static void drm_vm_open(struct vm_area_struct *vma)
427{
428 struct drm_file *priv = vma->vm_file->private_data;
429 struct drm_device *dev = priv->minor->dev;
430
431 mutex_lock(&dev->struct_mutex);
432 drm_vm_open_locked(vma);
433 mutex_unlock(&dev->struct_mutex);
434}
435
436void drm_vm_close_locked(struct vm_area_struct *vma)
437{
438 struct drm_file *priv = vma->vm_file->private_data;
439 struct drm_device *dev = priv->minor->dev;
440 struct drm_vma_entry *pt, *temp;
441
442 DRM_DEBUG("0x%08lx,0x%08lx\n",
443 vma->vm_start, vma->vm_end - vma->vm_start);
444 atomic_dec(&dev->vma_count);
445
446 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
447 if (pt->vma == vma) {
448 list_del(&pt->head);
449 kfree(pt);
450 break;
451 }
452 }
453}
454
455
456
457
458
459
460
461
462
463static void drm_vm_close(struct vm_area_struct *vma)
464{
465 struct drm_file *priv = vma->vm_file->private_data;
466 struct drm_device *dev = priv->minor->dev;
467
468 mutex_lock(&dev->struct_mutex);
469 drm_vm_close_locked(vma);
470 mutex_unlock(&dev->struct_mutex);
471}
472
473
474
475
476
477
478
479
480
481
482
483static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
484{
485 struct drm_file *priv = filp->private_data;
486 struct drm_device *dev;
487 struct drm_device_dma *dma;
488 unsigned long length = vma->vm_end - vma->vm_start;
489
490 dev = priv->minor->dev;
491 dma = dev->dma;
492 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
493 vma->vm_start, vma->vm_end, vma->vm_pgoff);
494
495
496 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
497 return -EINVAL;
498 }
499
500 if (!capable(CAP_SYS_ADMIN) &&
501 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
502 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
503#if defined(__i386__) || defined(__x86_64__)
504 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
505#else
506
507
508
509 vma->vm_page_prot =
510 __pgprot(pte_val
511 (pte_wrprotect
512 (__pte(pgprot_val(vma->vm_page_prot)))));
513#endif
514 }
515
516 vma->vm_ops = &drm_vm_dma_ops;
517
518 vma->vm_flags |= VM_RESERVED;
519 vma->vm_flags |= VM_DONTEXPAND;
520
521 vma->vm_file = filp;
522 drm_vm_open_locked(vma);
523 return 0;
524}
525
526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
527{
528#ifdef __alpha__
529 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
530#else
531 return 0;
532#endif
533}
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
549{
550 struct drm_file *priv = filp->private_data;
551 struct drm_device *dev = priv->minor->dev;
552 struct drm_local_map *map = NULL;
553 resource_size_t offset = 0;
554 struct drm_hash_item *hash;
555
556 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
557 vma->vm_start, vma->vm_end, vma->vm_pgoff);
558
559 if (!priv->authenticated)
560 return -EACCES;
561
562
563
564
565
566 if (!vma->vm_pgoff
567#if __OS_HAS_AGP
568 && (!dev->agp
569 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
570#endif
571 )
572 return drm_mmap_dma(filp, vma);
573
574 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
575 DRM_ERROR("Could not find map\n");
576 return -EINVAL;
577 }
578
579 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
580 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
581 return -EPERM;
582
583
584 if (map->size < vma->vm_end - vma->vm_start)
585 return -EINVAL;
586
587 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
588 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
589#if defined(__i386__) || defined(__x86_64__)
590 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
591#else
592
593
594
595 vma->vm_page_prot =
596 __pgprot(pte_val
597 (pte_wrprotect
598 (__pte(pgprot_val(vma->vm_page_prot)))));
599#endif
600 }
601
602 switch (map->type) {
603#if !defined(__arm__)
604 case _DRM_AGP:
605 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
606
607
608
609
610
611#if defined(__powerpc__)
612 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
613#endif
614 vma->vm_ops = &drm_vm_ops;
615 break;
616 }
617
618#endif
619 case _DRM_FRAME_BUFFER:
620 case _DRM_REGISTERS:
621 offset = drm_core_get_reg_ofs(dev);
622 vma->vm_flags |= VM_IO;
623 vma->vm_page_prot = drm_io_prot(map->type, vma);
624#if !defined(__arm__)
625 if (io_remap_pfn_range(vma, vma->vm_start,
626 (map->offset + offset) >> PAGE_SHIFT,
627 vma->vm_end - vma->vm_start,
628 vma->vm_page_prot))
629 return -EAGAIN;
630#else
631 if (remap_pfn_range(vma, vma->vm_start,
632 (map->offset + offset) >> PAGE_SHIFT,
633 vma->vm_end - vma->vm_start,
634 vma->vm_page_prot))
635 return -EAGAIN;
636#endif
637
638 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
639 " offset = 0x%llx\n",
640 map->type,
641 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
642
643 vma->vm_ops = &drm_vm_ops;
644 break;
645 case _DRM_CONSISTENT:
646
647
648 if (remap_pfn_range(vma, vma->vm_start,
649 page_to_pfn(virt_to_page(map->handle)),
650 vma->vm_end - vma->vm_start, vma->vm_page_prot))
651 return -EAGAIN;
652 vma->vm_page_prot = drm_dma_prot(map->type, vma);
653
654 case _DRM_SHM:
655 vma->vm_ops = &drm_vm_shm_ops;
656 vma->vm_private_data = (void *)map;
657
658
659 vma->vm_flags |= VM_RESERVED;
660 break;
661 case _DRM_SCATTER_GATHER:
662 vma->vm_ops = &drm_vm_sg_ops;
663 vma->vm_private_data = (void *)map;
664 vma->vm_flags |= VM_RESERVED;
665 vma->vm_page_prot = drm_dma_prot(map->type, vma);
666 break;
667 default:
668 return -EINVAL;
669 }
670 vma->vm_flags |= VM_RESERVED;
671 vma->vm_flags |= VM_DONTEXPAND;
672
673 vma->vm_file = filp;
674 drm_vm_open_locked(vma);
675 return 0;
676}
677
678int drm_mmap(struct file *filp, struct vm_area_struct *vma)
679{
680 struct drm_file *priv = filp->private_data;
681 struct drm_device *dev = priv->minor->dev;
682 int ret;
683
684 mutex_lock(&dev->struct_mutex);
685 ret = drm_mmap_locked(filp, vma);
686 mutex_unlock(&dev->struct_mutex);
687
688 return ret;
689}
690EXPORT_SYMBOL(drm_mmap);
691