1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include <drm/drmP.h>
40#include <drm/drm_vma_manager.h>
41#include <drm/drm_gem.h>
42#include "drm_internal.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#if BITS_PER_LONG == 64
82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
84#else
85#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
86#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
87#endif
88
89
90
91
92
93int
94drm_gem_init(struct drm_device *dev)
95{
96 struct drm_vma_offset_manager *vma_offset_manager;
97
98 mutex_init(&dev->object_name_lock);
99 idr_init(&dev->object_name_idr);
100
101 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
102 if (!vma_offset_manager) {
103 DRM_ERROR("out of memory\n");
104 return -ENOMEM;
105 }
106
107 dev->vma_offset_manager = vma_offset_manager;
108 drm_vma_offset_manager_init(vma_offset_manager,
109 DRM_FILE_PAGE_OFFSET_START,
110 DRM_FILE_PAGE_OFFSET_SIZE);
111
112 return 0;
113}
114
115void
116drm_gem_destroy(struct drm_device *dev)
117{
118
119 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
120 kfree(dev->vma_offset_manager);
121 dev->vma_offset_manager = NULL;
122}
123
124
125
126
127
128
129
130
131
132
133int drm_gem_object_init(struct drm_device *dev,
134 struct drm_gem_object *obj, size_t size)
135{
136 struct file *filp;
137
138 drm_gem_private_object_init(dev, obj, size);
139
140 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
141 if (IS_ERR(filp))
142 return PTR_ERR(filp);
143
144 obj->filp = filp;
145
146 return 0;
147}
148EXPORT_SYMBOL(drm_gem_object_init);
149
150
151
152
153
154
155
156
157
158
159
160void drm_gem_private_object_init(struct drm_device *dev,
161 struct drm_gem_object *obj, size_t size)
162{
163 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
164
165 obj->dev = dev;
166 obj->filp = NULL;
167
168 kref_init(&obj->refcount);
169 obj->handle_count = 0;
170 obj->size = size;
171 drm_vma_node_reset(&obj->vma_node);
172}
173EXPORT_SYMBOL(drm_gem_private_object_init);
174
175static void
176drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
177{
178
179
180
181
182 mutex_lock(&filp->prime.lock);
183 if (obj->dma_buf) {
184 drm_prime_remove_buf_handle_locked(&filp->prime,
185 obj->dma_buf);
186 }
187 mutex_unlock(&filp->prime.lock);
188}
189
190
191
192
193
194
195
196
197
198
199
200static void drm_gem_object_handle_free(struct drm_gem_object *obj)
201{
202 struct drm_device *dev = obj->dev;
203
204
205 if (obj->name) {
206 idr_remove(&dev->object_name_idr, obj->name);
207 obj->name = 0;
208 }
209}
210
211static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
212{
213
214 if (obj->dma_buf) {
215 dma_buf_put(obj->dma_buf);
216 obj->dma_buf = NULL;
217 }
218}
219
220static void
221drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
222{
223 if (WARN_ON(obj->handle_count == 0))
224 return;
225
226
227
228
229
230
231
232 mutex_lock(&obj->dev->object_name_lock);
233 if (--obj->handle_count == 0) {
234 drm_gem_object_handle_free(obj);
235 drm_gem_object_exported_dma_buf_free(obj);
236 }
237 mutex_unlock(&obj->dev->object_name_lock);
238
239 drm_gem_object_unreference_unlocked(obj);
240}
241
242
243
244
245
246
247
248
249
250int
251drm_gem_handle_delete(struct drm_file *filp, u32 handle)
252{
253 struct drm_device *dev;
254 struct drm_gem_object *obj;
255
256
257
258
259
260
261
262
263
264
265 spin_lock(&filp->table_lock);
266
267
268 obj = idr_find(&filp->object_idr, handle);
269 if (obj == NULL) {
270 spin_unlock(&filp->table_lock);
271 return -EINVAL;
272 }
273 dev = obj->dev;
274
275
276 idr_remove(&filp->object_idr, handle);
277 spin_unlock(&filp->table_lock);
278
279 if (drm_core_check_feature(dev, DRIVER_PRIME))
280 drm_gem_remove_prime_handles(obj, filp);
281 drm_vma_node_revoke(&obj->vma_node, filp->filp);
282
283 if (dev->driver->gem_close_object)
284 dev->driver->gem_close_object(obj, filp);
285 drm_gem_object_handle_unreference_unlocked(obj);
286
287 return 0;
288}
289EXPORT_SYMBOL(drm_gem_handle_delete);
290
291
292
293
294
295
296
297
298
299
300int drm_gem_dumb_destroy(struct drm_file *file,
301 struct drm_device *dev,
302 uint32_t handle)
303{
304 return drm_gem_handle_delete(file, handle);
305}
306EXPORT_SYMBOL(drm_gem_dumb_destroy);
307
308
309
310
311
312
313
314
315
316
317
318int
319drm_gem_handle_create_tail(struct drm_file *file_priv,
320 struct drm_gem_object *obj,
321 u32 *handlep)
322{
323 struct drm_device *dev = obj->dev;
324 int ret;
325
326 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
327
328
329
330
331
332 idr_preload(GFP_KERNEL);
333 spin_lock(&file_priv->table_lock);
334
335 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
336 drm_gem_object_reference(obj);
337 obj->handle_count++;
338 spin_unlock(&file_priv->table_lock);
339 idr_preload_end();
340 mutex_unlock(&dev->object_name_lock);
341 if (ret < 0) {
342 drm_gem_object_handle_unreference_unlocked(obj);
343 return ret;
344 }
345 *handlep = ret;
346
347 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
348 if (ret) {
349 drm_gem_handle_delete(file_priv, *handlep);
350 return ret;
351 }
352
353 if (dev->driver->gem_open_object) {
354 ret = dev->driver->gem_open_object(obj, file_priv);
355 if (ret) {
356 drm_gem_handle_delete(file_priv, *handlep);
357 return ret;
358 }
359 }
360
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373
374int drm_gem_handle_create(struct drm_file *file_priv,
375 struct drm_gem_object *obj,
376 u32 *handlep)
377{
378 mutex_lock(&obj->dev->object_name_lock);
379
380 return drm_gem_handle_create_tail(file_priv, obj, handlep);
381}
382EXPORT_SYMBOL(drm_gem_handle_create);
383
384
385
386
387
388
389
390
391void
392drm_gem_free_mmap_offset(struct drm_gem_object *obj)
393{
394 struct drm_device *dev = obj->dev;
395
396 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
397}
398EXPORT_SYMBOL(drm_gem_free_mmap_offset);
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414int
415drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
416{
417 struct drm_device *dev = obj->dev;
418
419 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
420 size / PAGE_SIZE);
421}
422EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
423
424
425
426
427
428
429
430
431
432
433
434
435int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
436{
437 return drm_gem_create_mmap_offset_size(obj, obj->size);
438}
439EXPORT_SYMBOL(drm_gem_create_mmap_offset);
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462struct page **drm_gem_get_pages(struct drm_gem_object *obj)
463{
464 struct address_space *mapping;
465 struct page *p, **pages;
466 int i, npages;
467
468
469 mapping = file_inode(obj->filp)->i_mapping;
470
471
472
473
474
475 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
476
477 npages = obj->size >> PAGE_SHIFT;
478
479 pages = drm_malloc_ab(npages, sizeof(struct page *));
480 if (pages == NULL)
481 return ERR_PTR(-ENOMEM);
482
483 for (i = 0; i < npages; i++) {
484 p = shmem_read_mapping_page(mapping, i);
485 if (IS_ERR(p))
486 goto fail;
487 pages[i] = p;
488
489
490
491
492
493
494 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
495 (page_to_pfn(p) >= 0x00100000UL));
496 }
497
498 return pages;
499
500fail:
501 while (i--)
502 page_cache_release(pages[i]);
503
504 drm_free_large(pages);
505 return ERR_CAST(p);
506}
507EXPORT_SYMBOL(drm_gem_get_pages);
508
509
510
511
512
513
514
515
516void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
517 bool dirty, bool accessed)
518{
519 int i, npages;
520
521
522
523
524
525 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
526
527 npages = obj->size >> PAGE_SHIFT;
528
529 for (i = 0; i < npages; i++) {
530 if (dirty)
531 set_page_dirty(pages[i]);
532
533 if (accessed)
534 mark_page_accessed(pages[i]);
535
536
537 page_cache_release(pages[i]);
538 }
539
540 drm_free_large(pages);
541}
542EXPORT_SYMBOL(drm_gem_put_pages);
543
544
545struct drm_gem_object *
546drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
547 u32 handle)
548{
549 struct drm_gem_object *obj;
550
551 spin_lock(&filp->table_lock);
552
553
554 obj = idr_find(&filp->object_idr, handle);
555 if (obj == NULL) {
556 spin_unlock(&filp->table_lock);
557 return NULL;
558 }
559
560 drm_gem_object_reference(obj);
561
562 spin_unlock(&filp->table_lock);
563
564 return obj;
565}
566EXPORT_SYMBOL(drm_gem_object_lookup);
567
568
569
570
571
572
573
574
575
576int
577drm_gem_close_ioctl(struct drm_device *dev, void *data,
578 struct drm_file *file_priv)
579{
580 struct drm_gem_close *args = data;
581 int ret;
582
583 if (!drm_core_check_feature(dev, DRIVER_GEM))
584 return -ENODEV;
585
586 ret = drm_gem_handle_delete(file_priv, args->handle);
587
588 return ret;
589}
590
591
592
593
594
595
596
597
598
599
600
601
602int
603drm_gem_flink_ioctl(struct drm_device *dev, void *data,
604 struct drm_file *file_priv)
605{
606 struct drm_gem_flink *args = data;
607 struct drm_gem_object *obj;
608 int ret;
609
610 if (!drm_core_check_feature(dev, DRIVER_GEM))
611 return -ENODEV;
612
613 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
614 if (obj == NULL)
615 return -ENOENT;
616
617 mutex_lock(&dev->object_name_lock);
618 idr_preload(GFP_KERNEL);
619
620 if (obj->handle_count == 0) {
621 ret = -ENOENT;
622 goto err;
623 }
624
625 if (!obj->name) {
626 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
627 if (ret < 0)
628 goto err;
629
630 obj->name = ret;
631 }
632
633 args->name = (uint64_t) obj->name;
634 ret = 0;
635
636err:
637 idr_preload_end();
638 mutex_unlock(&dev->object_name_lock);
639 drm_gem_object_unreference_unlocked(obj);
640 return ret;
641}
642
643
644
645
646
647
648
649
650
651
652
653
654int
655drm_gem_open_ioctl(struct drm_device *dev, void *data,
656 struct drm_file *file_priv)
657{
658 struct drm_gem_open *args = data;
659 struct drm_gem_object *obj;
660 int ret;
661 u32 handle;
662
663 if (!drm_core_check_feature(dev, DRIVER_GEM))
664 return -ENODEV;
665
666 mutex_lock(&dev->object_name_lock);
667 obj = idr_find(&dev->object_name_idr, (int) args->name);
668 if (obj) {
669 drm_gem_object_reference(obj);
670 } else {
671 mutex_unlock(&dev->object_name_lock);
672 return -ENOENT;
673 }
674
675
676 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
677 drm_gem_object_unreference_unlocked(obj);
678 if (ret)
679 return ret;
680
681 args->handle = handle;
682 args->size = obj->size;
683
684 return 0;
685}
686
687
688
689
690
691
692
693
694
695void
696drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
697{
698 idr_init(&file_private->object_idr);
699 spin_lock_init(&file_private->table_lock);
700}
701
702
703
704
705
706static int
707drm_gem_object_release_handle(int id, void *ptr, void *data)
708{
709 struct drm_file *file_priv = data;
710 struct drm_gem_object *obj = ptr;
711 struct drm_device *dev = obj->dev;
712
713 if (drm_core_check_feature(dev, DRIVER_PRIME))
714 drm_gem_remove_prime_handles(obj, file_priv);
715 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
716
717 if (dev->driver->gem_close_object)
718 dev->driver->gem_close_object(obj, file_priv);
719
720 drm_gem_object_handle_unreference_unlocked(obj);
721
722 return 0;
723}
724
725
726
727
728
729
730
731
732
733
734void
735drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
736{
737 idr_for_each(&file_private->object_idr,
738 &drm_gem_object_release_handle, file_private);
739 idr_destroy(&file_private->object_idr);
740}
741
742void
743drm_gem_object_release(struct drm_gem_object *obj)
744{
745 WARN_ON(obj->dma_buf);
746
747 if (obj->filp)
748 fput(obj->filp);
749
750 drm_gem_free_mmap_offset(obj);
751}
752EXPORT_SYMBOL(drm_gem_object_release);
753
754
755
756
757
758
759
760
761
762
763void
764drm_gem_object_free(struct kref *kref)
765{
766 struct drm_gem_object *obj =
767 container_of(kref, struct drm_gem_object, refcount);
768 struct drm_device *dev = obj->dev;
769
770 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
771
772 if (dev->driver->gem_free_object != NULL)
773 dev->driver->gem_free_object(obj);
774}
775EXPORT_SYMBOL(drm_gem_object_free);
776
777void drm_gem_vm_open(struct vm_area_struct *vma)
778{
779 struct drm_gem_object *obj = vma->vm_private_data;
780
781 drm_gem_object_reference(obj);
782}
783EXPORT_SYMBOL(drm_gem_vm_open);
784
785void drm_gem_vm_close(struct vm_area_struct *vma)
786{
787 struct drm_gem_object *obj = vma->vm_private_data;
788
789 drm_gem_object_unreference_unlocked(obj);
790}
791EXPORT_SYMBOL(drm_gem_vm_close);
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
818 struct vm_area_struct *vma)
819{
820 struct drm_device *dev = obj->dev;
821
822
823 if (obj_size < vma->vm_end - vma->vm_start)
824 return -EINVAL;
825
826 if (!dev->driver->gem_vm_ops)
827 return -EINVAL;
828
829 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
830 vma->vm_ops = dev->driver->gem_vm_ops;
831 vma->vm_private_data = obj;
832 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
833
834
835
836
837
838
839
840 drm_gem_object_reference(obj);
841
842 return 0;
843}
844EXPORT_SYMBOL(drm_gem_mmap_obj);
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
862{
863 struct drm_file *priv = filp->private_data;
864 struct drm_device *dev = priv->minor->dev;
865 struct drm_gem_object *obj = NULL;
866 struct drm_vma_offset_node *node;
867 int ret;
868
869 if (drm_device_is_unplugged(dev))
870 return -ENODEV;
871
872 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
873 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
874 vma->vm_pgoff,
875 vma_pages(vma));
876 if (likely(node)) {
877 obj = container_of(node, struct drm_gem_object, vma_node);
878
879
880
881
882
883
884
885
886
887
888 if (!kref_get_unless_zero(&obj->refcount))
889 obj = NULL;
890 }
891 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
892
893 if (!obj)
894 return -EINVAL;
895
896 if (!drm_vma_node_is_allowed(node, filp)) {
897 drm_gem_object_unreference_unlocked(obj);
898 return -EACCES;
899 }
900
901 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
902 vma);
903
904 drm_gem_object_unreference_unlocked(obj);
905
906 return ret;
907}
908EXPORT_SYMBOL(drm_gem_mmap);
909