1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <drm/drmP.h>
37#include <linux/export.h>
38#include <linux/seq_file.h>
39#if defined(__ia64__)
40#include <linux/efi.h>
41#include <linux/slab.h>
42#endif
43#include <asm/pgtable.h>
44#include "drm_legacy.h"
45
46struct drm_vma_entry {
47 struct list_head head;
48 struct vm_area_struct *vma;
49 pid_t pid;
50};
51
52static void drm_vm_open(struct vm_area_struct *vma);
53static void drm_vm_close(struct vm_area_struct *vma);
54
55static pgprot_t drm_io_prot(struct drm_local_map *map,
56 struct vm_area_struct *vma)
57{
58 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
59
60#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
61 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
62 tmp = pgprot_noncached(tmp);
63 else
64 tmp = pgprot_writecombine(tmp);
65#elif defined(__ia64__)
66 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
67 vma->vm_start))
68 tmp = pgprot_writecombine(tmp);
69 else
70 tmp = pgprot_noncached(tmp);
71#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
72 tmp = pgprot_noncached(tmp);
73#endif
74 return tmp;
75}
76
77static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
78{
79 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
80
81#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
82 tmp |= _PAGE_NO_CACHE;
83#endif
84 return tmp;
85}
86
87
88
89
90
91
92
93
94
95
96
97#if __OS_HAS_AGP
98static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
99{
100 struct drm_file *priv = vma->vm_file->private_data;
101 struct drm_device *dev = priv->minor->dev;
102 struct drm_local_map *map = NULL;
103 struct drm_map_list *r_list;
104 struct drm_hash_item *hash;
105
106
107
108
109 if (!dev->agp)
110 goto vm_fault_error;
111
112 if (!dev->agp || !dev->agp->cant_use_aperture)
113 goto vm_fault_error;
114
115 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
116 goto vm_fault_error;
117
118 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
119 map = r_list->map;
120
121 if (map && map->type == _DRM_AGP) {
122
123
124
125
126 resource_size_t offset = (unsigned long)vmf->virtual_address -
127 vma->vm_start;
128 resource_size_t baddr = map->offset + offset;
129 struct drm_agp_mem *agpmem;
130 struct page *page;
131
132#ifdef __alpha__
133
134
135
136 baddr -= dev->hose->mem_space->start;
137#endif
138
139
140
141
142 list_for_each_entry(agpmem, &dev->agp->memory, head) {
143 if (agpmem->bound <= baddr &&
144 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
145 break;
146 }
147
148 if (&agpmem->head == &dev->agp->memory)
149 goto vm_fault_error;
150
151
152
153
154 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
155 page = agpmem->memory->pages[offset];
156 get_page(page);
157 vmf->page = page;
158
159 DRM_DEBUG
160 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
161 (unsigned long long)baddr,
162 agpmem->memory->pages[offset],
163 (unsigned long long)offset,
164 page_count(page));
165 return 0;
166 }
167vm_fault_error:
168 return VM_FAULT_SIGBUS;
169}
170#else
171static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
172{
173 return VM_FAULT_SIGBUS;
174}
175#endif
176
177
178
179
180
181
182
183
184
185
186
187static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
188{
189 struct drm_local_map *map = vma->vm_private_data;
190 unsigned long offset;
191 unsigned long i;
192 struct page *page;
193
194 if (!map)
195 return VM_FAULT_SIGBUS;
196
197 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
198 i = (unsigned long)map->handle + offset;
199 page = vmalloc_to_page((void *)i);
200 if (!page)
201 return VM_FAULT_SIGBUS;
202 get_page(page);
203 vmf->page = page;
204
205 DRM_DEBUG("shm_fault 0x%lx\n", offset);
206 return 0;
207}
208
209
210
211
212
213
214
215
216
217static void drm_vm_shm_close(struct vm_area_struct *vma)
218{
219 struct drm_file *priv = vma->vm_file->private_data;
220 struct drm_device *dev = priv->minor->dev;
221 struct drm_vma_entry *pt, *temp;
222 struct drm_local_map *map;
223 struct drm_map_list *r_list;
224 int found_maps = 0;
225
226 DRM_DEBUG("0x%08lx,0x%08lx\n",
227 vma->vm_start, vma->vm_end - vma->vm_start);
228
229 map = vma->vm_private_data;
230
231 mutex_lock(&dev->struct_mutex);
232 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
233 if (pt->vma->vm_private_data == map)
234 found_maps++;
235 if (pt->vma == vma) {
236 list_del(&pt->head);
237 kfree(pt);
238 }
239 }
240
241
242 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
243
244
245
246 found_maps = 0;
247 list_for_each_entry(r_list, &dev->maplist, head) {
248 if (r_list->map == map)
249 found_maps++;
250 }
251
252 if (!found_maps) {
253 drm_dma_handle_t dmah;
254
255 switch (map->type) {
256 case _DRM_REGISTERS:
257 case _DRM_FRAME_BUFFER:
258 arch_phys_wc_del(map->mtrr);
259 iounmap(map->handle);
260 break;
261 case _DRM_SHM:
262 vfree(map->handle);
263 break;
264 case _DRM_AGP:
265 case _DRM_SCATTER_GATHER:
266 break;
267 case _DRM_CONSISTENT:
268 dmah.vaddr = map->handle;
269 dmah.busaddr = map->offset;
270 dmah.size = map->size;
271 __drm_legacy_pci_free(dev, &dmah);
272 break;
273 }
274 kfree(map);
275 }
276 }
277 mutex_unlock(&dev->struct_mutex);
278}
279
280
281
282
283
284
285
286
287
288
289static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
290{
291 struct drm_file *priv = vma->vm_file->private_data;
292 struct drm_device *dev = priv->minor->dev;
293 struct drm_device_dma *dma = dev->dma;
294 unsigned long offset;
295 unsigned long page_nr;
296 struct page *page;
297
298 if (!dma)
299 return VM_FAULT_SIGBUS;
300 if (!dma->pagelist)
301 return VM_FAULT_SIGBUS;
302
303 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
304 page_nr = offset >> PAGE_SHIFT;
305 page = virt_to_page((void *)dma->pagelist[page_nr]);
306
307 get_page(page);
308 vmf->page = page;
309
310 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
311 return 0;
312}
313
314
315
316
317
318
319
320
321
322
323static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
324{
325 struct drm_local_map *map = vma->vm_private_data;
326 struct drm_file *priv = vma->vm_file->private_data;
327 struct drm_device *dev = priv->minor->dev;
328 struct drm_sg_mem *entry = dev->sg;
329 unsigned long offset;
330 unsigned long map_offset;
331 unsigned long page_offset;
332 struct page *page;
333
334 if (!entry)
335 return VM_FAULT_SIGBUS;
336 if (!entry->pagelist)
337 return VM_FAULT_SIGBUS;
338
339 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
340 map_offset = map->offset - (unsigned long)dev->sg->virtual;
341 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
342 page = entry->pagelist[page_offset];
343 get_page(page);
344 vmf->page = page;
345
346 return 0;
347}
348
349static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
350{
351 return drm_do_vm_fault(vma, vmf);
352}
353
354static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355{
356 return drm_do_vm_shm_fault(vma, vmf);
357}
358
359static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360{
361 return drm_do_vm_dma_fault(vma, vmf);
362}
363
364static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
365{
366 return drm_do_vm_sg_fault(vma, vmf);
367}
368
369
370static const struct vm_operations_struct drm_vm_ops = {
371 .fault = drm_vm_fault,
372 .open = drm_vm_open,
373 .close = drm_vm_close,
374};
375
376
377static const struct vm_operations_struct drm_vm_shm_ops = {
378 .fault = drm_vm_shm_fault,
379 .open = drm_vm_open,
380 .close = drm_vm_shm_close,
381};
382
383
384static const struct vm_operations_struct drm_vm_dma_ops = {
385 .fault = drm_vm_dma_fault,
386 .open = drm_vm_open,
387 .close = drm_vm_close,
388};
389
390
391static const struct vm_operations_struct drm_vm_sg_ops = {
392 .fault = drm_vm_sg_fault,
393 .open = drm_vm_open,
394 .close = drm_vm_close,
395};
396
397
398
399
400
401
402
403
404
405void drm_vm_open_locked(struct drm_device *dev,
406 struct vm_area_struct *vma)
407{
408 struct drm_vma_entry *vma_entry;
409
410 DRM_DEBUG("0x%08lx,0x%08lx\n",
411 vma->vm_start, vma->vm_end - vma->vm_start);
412
413 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
414 if (vma_entry) {
415 vma_entry->vma = vma;
416 vma_entry->pid = current->pid;
417 list_add(&vma_entry->head, &dev->vmalist);
418 }
419}
420
421static void drm_vm_open(struct vm_area_struct *vma)
422{
423 struct drm_file *priv = vma->vm_file->private_data;
424 struct drm_device *dev = priv->minor->dev;
425
426 mutex_lock(&dev->struct_mutex);
427 drm_vm_open_locked(dev, vma);
428 mutex_unlock(&dev->struct_mutex);
429}
430
431void drm_vm_close_locked(struct drm_device *dev,
432 struct vm_area_struct *vma)
433{
434 struct drm_vma_entry *pt, *temp;
435
436 DRM_DEBUG("0x%08lx,0x%08lx\n",
437 vma->vm_start, vma->vm_end - vma->vm_start);
438
439 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
440 if (pt->vma == vma) {
441 list_del(&pt->head);
442 kfree(pt);
443 break;
444 }
445 }
446}
447
448
449
450
451
452
453
454
455
456static void drm_vm_close(struct vm_area_struct *vma)
457{
458 struct drm_file *priv = vma->vm_file->private_data;
459 struct drm_device *dev = priv->minor->dev;
460
461 mutex_lock(&dev->struct_mutex);
462 drm_vm_close_locked(dev, vma);
463 mutex_unlock(&dev->struct_mutex);
464}
465
466
467
468
469
470
471
472
473
474
475
476static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
477{
478 struct drm_file *priv = filp->private_data;
479 struct drm_device *dev;
480 struct drm_device_dma *dma;
481 unsigned long length = vma->vm_end - vma->vm_start;
482
483 dev = priv->minor->dev;
484 dma = dev->dma;
485 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
486 vma->vm_start, vma->vm_end, vma->vm_pgoff);
487
488
489 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
490 return -EINVAL;
491 }
492
493 if (!capable(CAP_SYS_ADMIN) &&
494 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
495 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
496#if defined(__i386__) || defined(__x86_64__)
497 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
498#else
499
500
501
502 vma->vm_page_prot =
503 __pgprot(pte_val
504 (pte_wrprotect
505 (__pte(pgprot_val(vma->vm_page_prot)))));
506#endif
507 }
508
509 vma->vm_ops = &drm_vm_dma_ops;
510
511 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
512
513 drm_vm_open_locked(dev, vma);
514 return 0;
515}
516
517static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
518{
519#ifdef __alpha__
520 return dev->hose->dense_mem_base;
521#else
522 return 0;
523#endif
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
540{
541 struct drm_file *priv = filp->private_data;
542 struct drm_device *dev = priv->minor->dev;
543 struct drm_local_map *map = NULL;
544 resource_size_t offset = 0;
545 struct drm_hash_item *hash;
546
547 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
548 vma->vm_start, vma->vm_end, vma->vm_pgoff);
549
550 if (!priv->authenticated)
551 return -EACCES;
552
553
554
555
556
557 if (!vma->vm_pgoff
558#if __OS_HAS_AGP
559 && (!dev->agp
560 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
561#endif
562 )
563 return drm_mmap_dma(filp, vma);
564
565 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
566 DRM_ERROR("Could not find map\n");
567 return -EINVAL;
568 }
569
570 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
571 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
572 return -EPERM;
573
574
575 if (map->size < vma->vm_end - vma->vm_start)
576 return -EINVAL;
577
578 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
579 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
580#if defined(__i386__) || defined(__x86_64__)
581 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
582#else
583
584
585
586 vma->vm_page_prot =
587 __pgprot(pte_val
588 (pte_wrprotect
589 (__pte(pgprot_val(vma->vm_page_prot)))));
590#endif
591 }
592
593 switch (map->type) {
594#if !defined(__arm__)
595 case _DRM_AGP:
596 if (dev->agp && dev->agp->cant_use_aperture) {
597
598
599
600
601
602#if defined(__powerpc__)
603 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
604#endif
605 vma->vm_ops = &drm_vm_ops;
606 break;
607 }
608
609#endif
610 case _DRM_FRAME_BUFFER:
611 case _DRM_REGISTERS:
612 offset = drm_core_get_reg_ofs(dev);
613 vma->vm_page_prot = drm_io_prot(map, vma);
614 if (io_remap_pfn_range(vma, vma->vm_start,
615 (map->offset + offset) >> PAGE_SHIFT,
616 vma->vm_end - vma->vm_start,
617 vma->vm_page_prot))
618 return -EAGAIN;
619 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
620 " offset = 0x%llx\n",
621 map->type,
622 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
623
624 vma->vm_ops = &drm_vm_ops;
625 break;
626 case _DRM_CONSISTENT:
627
628
629 if (remap_pfn_range(vma, vma->vm_start,
630 page_to_pfn(virt_to_page(map->handle)),
631 vma->vm_end - vma->vm_start, vma->vm_page_prot))
632 return -EAGAIN;
633 vma->vm_page_prot = drm_dma_prot(map->type, vma);
634
635 case _DRM_SHM:
636 vma->vm_ops = &drm_vm_shm_ops;
637 vma->vm_private_data = (void *)map;
638 break;
639 case _DRM_SCATTER_GATHER:
640 vma->vm_ops = &drm_vm_sg_ops;
641 vma->vm_private_data = (void *)map;
642 vma->vm_page_prot = drm_dma_prot(map->type, vma);
643 break;
644 default:
645 return -EINVAL;
646 }
647 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
648
649 drm_vm_open_locked(dev, vma);
650 return 0;
651}
652
653int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
654{
655 struct drm_file *priv = filp->private_data;
656 struct drm_device *dev = priv->minor->dev;
657 int ret;
658
659 if (drm_device_is_unplugged(dev))
660 return -ENODEV;
661
662 mutex_lock(&dev->struct_mutex);
663 ret = drm_mmap_locked(filp, vma);
664 mutex_unlock(&dev->struct_mutex);
665
666 return ret;
667}
668EXPORT_SYMBOL(drm_legacy_mmap);
669
670void drm_legacy_vma_flush(struct drm_device *dev)
671{
672 struct drm_vma_entry *vma, *vma_temp;
673
674
675 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
676 list_del(&vma->head);
677 kfree(vma);
678 }
679}
680
681int drm_vma_info(struct seq_file *m, void *data)
682{
683 struct drm_info_node *node = (struct drm_info_node *) m->private;
684 struct drm_device *dev = node->minor->dev;
685 struct drm_vma_entry *pt;
686 struct vm_area_struct *vma;
687 unsigned long vma_count = 0;
688#if defined(__i386__)
689 unsigned int pgprot;
690#endif
691
692 mutex_lock(&dev->struct_mutex);
693 list_for_each_entry(pt, &dev->vmalist, head)
694 vma_count++;
695
696 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
697 vma_count, high_memory,
698 (void *)(unsigned long)virt_to_phys(high_memory));
699
700 list_for_each_entry(pt, &dev->vmalist, head) {
701 vma = pt->vma;
702 if (!vma)
703 continue;
704 seq_printf(m,
705 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
706 pt->pid,
707 (void *)vma->vm_start, (void *)vma->vm_end,
708 vma->vm_flags & VM_READ ? 'r' : '-',
709 vma->vm_flags & VM_WRITE ? 'w' : '-',
710 vma->vm_flags & VM_EXEC ? 'x' : '-',
711 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
712 vma->vm_flags & VM_LOCKED ? 'l' : '-',
713 vma->vm_flags & VM_IO ? 'i' : '-',
714 vma->vm_pgoff);
715
716#if defined(__i386__)
717 pgprot = pgprot_val(vma->vm_page_prot);
718 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
719 pgprot & _PAGE_PRESENT ? 'p' : '-',
720 pgprot & _PAGE_RW ? 'w' : 'r',
721 pgprot & _PAGE_USER ? 'u' : 's',
722 pgprot & _PAGE_PWT ? 't' : 'b',
723 pgprot & _PAGE_PCD ? 'u' : 'c',
724 pgprot & _PAGE_ACCESSED ? 'a' : '-',
725 pgprot & _PAGE_DIRTY ? 'd' : '-',
726 pgprot & _PAGE_PSE ? 'm' : 'k',
727 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
728#endif
729 seq_printf(m, "\n");
730 }
731 mutex_unlock(&dev->struct_mutex);
732 return 0;
733}
734