1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include <linux/mem_encrypt.h>
40#include <linux/pagevec.h>
41
42#include <drm/drm_device.h>
43#include <drm/drm_drv.h>
44#include <drm/drm_file.h>
45#include <drm/drm_gem.h>
46#include <drm/drm_print.h>
47#include <drm/drm_vma_manager.h>
48
49#include "drm_internal.h"
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83int
84drm_gem_init(struct drm_device *dev)
85{
86 struct drm_vma_offset_manager *vma_offset_manager;
87
88 mutex_init(&dev->object_name_lock);
89 idr_init_base(&dev->object_name_idr, 1);
90
91 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
92 if (!vma_offset_manager) {
93 DRM_ERROR("out of memory\n");
94 return -ENOMEM;
95 }
96
97 dev->vma_offset_manager = vma_offset_manager;
98 drm_vma_offset_manager_init(vma_offset_manager,
99 DRM_FILE_PAGE_OFFSET_START,
100 DRM_FILE_PAGE_OFFSET_SIZE);
101
102 return 0;
103}
104
105void
106drm_gem_destroy(struct drm_device *dev)
107{
108
109 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
110 kfree(dev->vma_offset_manager);
111 dev->vma_offset_manager = NULL;
112}
113
114
115
116
117
118
119
120
121
122
123int drm_gem_object_init(struct drm_device *dev,
124 struct drm_gem_object *obj, size_t size)
125{
126 struct file *filp;
127
128 drm_gem_private_object_init(dev, obj, size);
129
130 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
131 if (IS_ERR(filp))
132 return PTR_ERR(filp);
133
134 obj->filp = filp;
135
136 return 0;
137}
138EXPORT_SYMBOL(drm_gem_object_init);
139
140
141
142
143
144
145
146
147
148
149
150void drm_gem_private_object_init(struct drm_device *dev,
151 struct drm_gem_object *obj, size_t size)
152{
153 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154
155 obj->dev = dev;
156 obj->filp = NULL;
157
158 kref_init(&obj->refcount);
159 obj->handle_count = 0;
160 obj->size = size;
161 reservation_object_init(&obj->_resv);
162 if (!obj->resv)
163 obj->resv = &obj->_resv;
164
165 drm_vma_node_reset(&obj->vma_node);
166}
167EXPORT_SYMBOL(drm_gem_private_object_init);
168
169static void
170drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
171{
172
173
174
175
176 mutex_lock(&filp->prime.lock);
177 if (obj->dma_buf) {
178 drm_prime_remove_buf_handle_locked(&filp->prime,
179 obj->dma_buf);
180 }
181 mutex_unlock(&filp->prime.lock);
182}
183
184
185
186
187
188
189
190
191
192
193
194static void drm_gem_object_handle_free(struct drm_gem_object *obj)
195{
196 struct drm_device *dev = obj->dev;
197
198
199 if (obj->name) {
200 idr_remove(&dev->object_name_idr, obj->name);
201 obj->name = 0;
202 }
203}
204
205static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
206{
207
208 if (obj->dma_buf) {
209 dma_buf_put(obj->dma_buf);
210 obj->dma_buf = NULL;
211 }
212}
213
214static void
215drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
216{
217 struct drm_device *dev = obj->dev;
218 bool final = false;
219
220 if (WARN_ON(obj->handle_count == 0))
221 return;
222
223
224
225
226
227
228
229 mutex_lock(&dev->object_name_lock);
230 if (--obj->handle_count == 0) {
231 drm_gem_object_handle_free(obj);
232 drm_gem_object_exported_dma_buf_free(obj);
233 final = true;
234 }
235 mutex_unlock(&dev->object_name_lock);
236
237 if (final)
238 drm_gem_object_put_unlocked(obj);
239}
240
241
242
243
244
245static int
246drm_gem_object_release_handle(int id, void *ptr, void *data)
247{
248 struct drm_file *file_priv = data;
249 struct drm_gem_object *obj = ptr;
250 struct drm_device *dev = obj->dev;
251
252 if (obj->funcs && obj->funcs->close)
253 obj->funcs->close(obj, file_priv);
254 else if (dev->driver->gem_close_object)
255 dev->driver->gem_close_object(obj, file_priv);
256
257 if (drm_core_check_feature(dev, DRIVER_PRIME))
258 drm_gem_remove_prime_handles(obj, file_priv);
259 drm_vma_node_revoke(&obj->vma_node, file_priv);
260
261 drm_gem_object_handle_put_unlocked(obj);
262
263 return 0;
264}
265
266
267
268
269
270
271
272
273
274
275int
276drm_gem_handle_delete(struct drm_file *filp, u32 handle)
277{
278 struct drm_gem_object *obj;
279
280 spin_lock(&filp->table_lock);
281
282
283 obj = idr_replace(&filp->object_idr, NULL, handle);
284 spin_unlock(&filp->table_lock);
285 if (IS_ERR_OR_NULL(obj))
286 return -EINVAL;
287
288
289 drm_gem_object_release_handle(handle, obj, filp);
290
291
292 spin_lock(&filp->table_lock);
293 idr_remove(&filp->object_idr, handle);
294 spin_unlock(&filp->table_lock);
295
296 return 0;
297}
298EXPORT_SYMBOL(drm_gem_handle_delete);
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
314 u32 handle, u64 *offset)
315{
316 struct drm_gem_object *obj;
317 int ret;
318
319 obj = drm_gem_object_lookup(file, handle);
320 if (!obj)
321 return -ENOENT;
322
323
324 if (obj->import_attach) {
325 ret = -EINVAL;
326 goto out;
327 }
328
329 ret = drm_gem_create_mmap_offset(obj);
330 if (ret)
331 goto out;
332
333 *offset = drm_vma_node_offset_addr(&obj->vma_node);
334out:
335 drm_gem_object_put_unlocked(obj);
336
337 return ret;
338}
339EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
340
341
342
343
344
345
346
347
348
349
350int drm_gem_dumb_destroy(struct drm_file *file,
351 struct drm_device *dev,
352 uint32_t handle)
353{
354 return drm_gem_handle_delete(file, handle);
355}
356EXPORT_SYMBOL(drm_gem_dumb_destroy);
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372int
373drm_gem_handle_create_tail(struct drm_file *file_priv,
374 struct drm_gem_object *obj,
375 u32 *handlep)
376{
377 struct drm_device *dev = obj->dev;
378 u32 handle;
379 int ret;
380
381 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
382 if (obj->handle_count++ == 0)
383 drm_gem_object_get(obj);
384
385
386
387
388
389 idr_preload(GFP_KERNEL);
390 spin_lock(&file_priv->table_lock);
391
392 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
393
394 spin_unlock(&file_priv->table_lock);
395 idr_preload_end();
396
397 mutex_unlock(&dev->object_name_lock);
398 if (ret < 0)
399 goto err_unref;
400
401 handle = ret;
402
403 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
404 if (ret)
405 goto err_remove;
406
407 if (obj->funcs && obj->funcs->open) {
408 ret = obj->funcs->open(obj, file_priv);
409 if (ret)
410 goto err_revoke;
411 } else if (dev->driver->gem_open_object) {
412 ret = dev->driver->gem_open_object(obj, file_priv);
413 if (ret)
414 goto err_revoke;
415 }
416
417 *handlep = handle;
418 return 0;
419
420err_revoke:
421 drm_vma_node_revoke(&obj->vma_node, file_priv);
422err_remove:
423 spin_lock(&file_priv->table_lock);
424 idr_remove(&file_priv->object_idr, handle);
425 spin_unlock(&file_priv->table_lock);
426err_unref:
427 drm_gem_object_handle_put_unlocked(obj);
428 return ret;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444int drm_gem_handle_create(struct drm_file *file_priv,
445 struct drm_gem_object *obj,
446 u32 *handlep)
447{
448 mutex_lock(&obj->dev->object_name_lock);
449
450 return drm_gem_handle_create_tail(file_priv, obj, handlep);
451}
452EXPORT_SYMBOL(drm_gem_handle_create);
453
454
455
456
457
458
459
460
461
462
463
464
465void
466drm_gem_free_mmap_offset(struct drm_gem_object *obj)
467{
468 struct drm_device *dev = obj->dev;
469
470 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
471}
472EXPORT_SYMBOL(drm_gem_free_mmap_offset);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491int
492drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
493{
494 struct drm_device *dev = obj->dev;
495
496 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
497 size / PAGE_SIZE);
498}
499EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
516{
517 return drm_gem_create_mmap_offset_size(obj, obj->size);
518}
519EXPORT_SYMBOL(drm_gem_create_mmap_offset);
520
521
522
523
524
525static void drm_gem_check_release_pagevec(struct pagevec *pvec)
526{
527 check_move_unevictable_pages(pvec);
528 __pagevec_release(pvec);
529 cond_resched();
530}
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553struct page **drm_gem_get_pages(struct drm_gem_object *obj)
554{
555 struct address_space *mapping;
556 struct page *p, **pages;
557 struct pagevec pvec;
558 int i, npages;
559
560
561 mapping = obj->filp->f_mapping;
562
563
564
565
566
567 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
568
569 npages = obj->size >> PAGE_SHIFT;
570
571 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
572 if (pages == NULL)
573 return ERR_PTR(-ENOMEM);
574
575 mapping_set_unevictable(mapping);
576
577 for (i = 0; i < npages; i++) {
578 p = shmem_read_mapping_page(mapping, i);
579 if (IS_ERR(p))
580 goto fail;
581 pages[i] = p;
582
583
584
585
586
587
588 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
589 (page_to_pfn(p) >= 0x00100000UL));
590 }
591
592 return pages;
593
594fail:
595 mapping_clear_unevictable(mapping);
596 pagevec_init(&pvec);
597 while (i--) {
598 if (!pagevec_add(&pvec, pages[i]))
599 drm_gem_check_release_pagevec(&pvec);
600 }
601 if (pagevec_count(&pvec))
602 drm_gem_check_release_pagevec(&pvec);
603
604 kvfree(pages);
605 return ERR_CAST(p);
606}
607EXPORT_SYMBOL(drm_gem_get_pages);
608
609
610
611
612
613
614
615
616void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 bool dirty, bool accessed)
618{
619 int i, npages;
620 struct address_space *mapping;
621 struct pagevec pvec;
622
623 mapping = file_inode(obj->filp)->i_mapping;
624 mapping_clear_unevictable(mapping);
625
626
627
628
629
630 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
631
632 npages = obj->size >> PAGE_SHIFT;
633
634 pagevec_init(&pvec);
635 for (i = 0; i < npages; i++) {
636 if (dirty)
637 set_page_dirty(pages[i]);
638
639 if (accessed)
640 mark_page_accessed(pages[i]);
641
642
643 if (!pagevec_add(&pvec, pages[i]))
644 drm_gem_check_release_pagevec(&pvec);
645 }
646 if (pagevec_count(&pvec))
647 drm_gem_check_release_pagevec(&pvec);
648
649 kvfree(pages);
650}
651EXPORT_SYMBOL(drm_gem_put_pages);
652
653static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
654 struct drm_gem_object **objs)
655{
656 int i, ret = 0;
657 struct drm_gem_object *obj;
658
659 spin_lock(&filp->table_lock);
660
661 for (i = 0; i < count; i++) {
662
663 obj = idr_find(&filp->object_idr, handle[i]);
664 if (!obj) {
665 ret = -ENOENT;
666 break;
667 }
668 drm_gem_object_get(obj);
669 objs[i] = obj;
670 }
671 spin_unlock(&filp->table_lock);
672
673 return ret;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
696 int count, struct drm_gem_object ***objs_out)
697{
698 int ret;
699 u32 *handles;
700 struct drm_gem_object **objs;
701
702 if (!count)
703 return 0;
704
705 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
706 GFP_KERNEL | __GFP_ZERO);
707 if (!objs)
708 return -ENOMEM;
709
710 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
711 if (!handles) {
712 ret = -ENOMEM;
713 goto out;
714 }
715
716 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
717 ret = -EFAULT;
718 DRM_DEBUG("Failed to copy in GEM handles\n");
719 goto out;
720 }
721
722 ret = objects_lookup(filp, handles, count, objs);
723 *objs_out = objs;
724
725out:
726 kvfree(handles);
727 return ret;
728
729}
730EXPORT_SYMBOL(drm_gem_objects_lookup);
731
732
733
734
735
736
737
738
739
740
741
742
743
744struct drm_gem_object *
745drm_gem_object_lookup(struct drm_file *filp, u32 handle)
746{
747 struct drm_gem_object *obj = NULL;
748
749 objects_lookup(filp, &handle, 1, &obj);
750 return obj;
751}
752EXPORT_SYMBOL(drm_gem_object_lookup);
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
768 bool wait_all, unsigned long timeout)
769{
770 long ret;
771 struct drm_gem_object *obj;
772
773 obj = drm_gem_object_lookup(filep, handle);
774 if (!obj) {
775 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
776 return -EINVAL;
777 }
778
779 ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
780 true, timeout);
781 if (ret == 0)
782 ret = -ETIME;
783 else if (ret > 0)
784 ret = 0;
785
786 drm_gem_object_put_unlocked(obj);
787
788 return ret;
789}
790EXPORT_SYMBOL(drm_gem_reservation_object_wait);
791
792
793
794
795
796
797
798
799
800int
801drm_gem_close_ioctl(struct drm_device *dev, void *data,
802 struct drm_file *file_priv)
803{
804 struct drm_gem_close *args = data;
805 int ret;
806
807 if (!drm_core_check_feature(dev, DRIVER_GEM))
808 return -EOPNOTSUPP;
809
810 ret = drm_gem_handle_delete(file_priv, args->handle);
811
812 return ret;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826int
827drm_gem_flink_ioctl(struct drm_device *dev, void *data,
828 struct drm_file *file_priv)
829{
830 struct drm_gem_flink *args = data;
831 struct drm_gem_object *obj;
832 int ret;
833
834 if (!drm_core_check_feature(dev, DRIVER_GEM))
835 return -EOPNOTSUPP;
836
837 obj = drm_gem_object_lookup(file_priv, args->handle);
838 if (obj == NULL)
839 return -ENOENT;
840
841 mutex_lock(&dev->object_name_lock);
842
843 if (obj->handle_count == 0) {
844 ret = -ENOENT;
845 goto err;
846 }
847
848 if (!obj->name) {
849 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
850 if (ret < 0)
851 goto err;
852
853 obj->name = ret;
854 }
855
856 args->name = (uint64_t) obj->name;
857 ret = 0;
858
859err:
860 mutex_unlock(&dev->object_name_lock);
861 drm_gem_object_put_unlocked(obj);
862 return ret;
863}
864
865
866
867
868
869
870
871
872
873
874
875
876int
877drm_gem_open_ioctl(struct drm_device *dev, void *data,
878 struct drm_file *file_priv)
879{
880 struct drm_gem_open *args = data;
881 struct drm_gem_object *obj;
882 int ret;
883 u32 handle;
884
885 if (!drm_core_check_feature(dev, DRIVER_GEM))
886 return -EOPNOTSUPP;
887
888 mutex_lock(&dev->object_name_lock);
889 obj = idr_find(&dev->object_name_idr, (int) args->name);
890 if (obj) {
891 drm_gem_object_get(obj);
892 } else {
893 mutex_unlock(&dev->object_name_lock);
894 return -ENOENT;
895 }
896
897
898 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
899 drm_gem_object_put_unlocked(obj);
900 if (ret)
901 return ret;
902
903 args->handle = handle;
904 args->size = obj->size;
905
906 return 0;
907}
908
909
910
911
912
913
914
915
916
917void
918drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
919{
920 idr_init_base(&file_private->object_idr, 1);
921 spin_lock_init(&file_private->table_lock);
922}
923
924
925
926
927
928
929
930
931
932
933void
934drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
935{
936 idr_for_each(&file_private->object_idr,
937 &drm_gem_object_release_handle, file_private);
938 idr_destroy(&file_private->object_idr);
939}
940
941
942
943
944
945
946
947
948void
949drm_gem_object_release(struct drm_gem_object *obj)
950{
951 WARN_ON(obj->dma_buf);
952
953 if (obj->filp)
954 fput(obj->filp);
955
956 reservation_object_fini(&obj->_resv);
957 drm_gem_free_mmap_offset(obj);
958}
959EXPORT_SYMBOL(drm_gem_object_release);
960
961
962
963
964
965
966
967
968
969
970void
971drm_gem_object_free(struct kref *kref)
972{
973 struct drm_gem_object *obj =
974 container_of(kref, struct drm_gem_object, refcount);
975 struct drm_device *dev = obj->dev;
976
977 if (obj->funcs) {
978 obj->funcs->free(obj);
979 } else if (dev->driver->gem_free_object_unlocked) {
980 dev->driver->gem_free_object_unlocked(obj);
981 } else if (dev->driver->gem_free_object) {
982 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
983
984 dev->driver->gem_free_object(obj);
985 }
986}
987EXPORT_SYMBOL(drm_gem_object_free);
988
989
990
991
992
993
994
995
996
997
998void
999drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1000{
1001 struct drm_device *dev;
1002
1003 if (!obj)
1004 return;
1005
1006 dev = obj->dev;
1007
1008 if (dev->driver->gem_free_object) {
1009 might_lock(&dev->struct_mutex);
1010 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1011 &dev->struct_mutex))
1012 mutex_unlock(&dev->struct_mutex);
1013 } else {
1014 kref_put(&obj->refcount, drm_gem_object_free);
1015 }
1016}
1017EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030void
1031drm_gem_object_put(struct drm_gem_object *obj)
1032{
1033 if (obj) {
1034 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1035
1036 kref_put(&obj->refcount, drm_gem_object_free);
1037 }
1038}
1039EXPORT_SYMBOL(drm_gem_object_put);
1040
1041
1042
1043
1044
1045
1046
1047
1048void drm_gem_vm_open(struct vm_area_struct *vma)
1049{
1050 struct drm_gem_object *obj = vma->vm_private_data;
1051
1052 drm_gem_object_get(obj);
1053}
1054EXPORT_SYMBOL(drm_gem_vm_open);
1055
1056
1057
1058
1059
1060
1061
1062
1063void drm_gem_vm_close(struct vm_area_struct *vma)
1064{
1065 struct drm_gem_object *obj = vma->vm_private_data;
1066
1067 drm_gem_object_put_unlocked(obj);
1068}
1069EXPORT_SYMBOL(drm_gem_vm_close);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1096 struct vm_area_struct *vma)
1097{
1098 struct drm_device *dev = obj->dev;
1099
1100
1101 if (obj_size < vma->vm_end - vma->vm_start)
1102 return -EINVAL;
1103
1104 if (obj->funcs && obj->funcs->vm_ops)
1105 vma->vm_ops = obj->funcs->vm_ops;
1106 else if (dev->driver->gem_vm_ops)
1107 vma->vm_ops = dev->driver->gem_vm_ops;
1108 else
1109 return -EINVAL;
1110
1111 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1112 vma->vm_private_data = obj;
1113 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1114 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1115
1116
1117
1118
1119
1120
1121
1122 drm_gem_object_get(obj);
1123
1124 return 0;
1125}
1126EXPORT_SYMBOL(drm_gem_mmap_obj);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1144{
1145 struct drm_file *priv = filp->private_data;
1146 struct drm_device *dev = priv->minor->dev;
1147 struct drm_gem_object *obj = NULL;
1148 struct drm_vma_offset_node *node;
1149 int ret;
1150
1151 if (drm_dev_is_unplugged(dev))
1152 return -ENODEV;
1153
1154 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1155 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1156 vma->vm_pgoff,
1157 vma_pages(vma));
1158 if (likely(node)) {
1159 obj = container_of(node, struct drm_gem_object, vma_node);
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 if (!kref_get_unless_zero(&obj->refcount))
1171 obj = NULL;
1172 }
1173 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1174
1175 if (!obj)
1176 return -EINVAL;
1177
1178 if (!drm_vma_node_is_allowed(node, priv)) {
1179 drm_gem_object_put_unlocked(obj);
1180 return -EACCES;
1181 }
1182
1183 if (node->readonly) {
1184 if (vma->vm_flags & VM_WRITE) {
1185 drm_gem_object_put_unlocked(obj);
1186 return -EINVAL;
1187 }
1188
1189 vma->vm_flags &= ~VM_MAYWRITE;
1190 }
1191
1192 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1193 vma);
1194
1195 drm_gem_object_put_unlocked(obj);
1196
1197 return ret;
1198}
1199EXPORT_SYMBOL(drm_gem_mmap);
1200
1201void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1202 const struct drm_gem_object *obj)
1203{
1204 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1205 drm_printf_indent(p, indent, "refcount=%u\n",
1206 kref_read(&obj->refcount));
1207 drm_printf_indent(p, indent, "start=%08lx\n",
1208 drm_vma_node_start(&obj->vma_node));
1209 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1210 drm_printf_indent(p, indent, "imported=%s\n",
1211 obj->import_attach ? "yes" : "no");
1212
1213 if (obj->funcs && obj->funcs->print_info)
1214 obj->funcs->print_info(p, indent, obj);
1215 else if (obj->dev->driver->gem_print_info)
1216 obj->dev->driver->gem_print_info(p, indent, obj);
1217}
1218
1219int drm_gem_pin(struct drm_gem_object *obj)
1220{
1221 if (obj->funcs && obj->funcs->pin)
1222 return obj->funcs->pin(obj);
1223 else if (obj->dev->driver->gem_prime_pin)
1224 return obj->dev->driver->gem_prime_pin(obj);
1225 else
1226 return 0;
1227}
1228
1229void drm_gem_unpin(struct drm_gem_object *obj)
1230{
1231 if (obj->funcs && obj->funcs->unpin)
1232 obj->funcs->unpin(obj);
1233 else if (obj->dev->driver->gem_prime_unpin)
1234 obj->dev->driver->gem_prime_unpin(obj);
1235}
1236
1237void *drm_gem_vmap(struct drm_gem_object *obj)
1238{
1239 void *vaddr;
1240
1241 if (obj->funcs && obj->funcs->vmap)
1242 vaddr = obj->funcs->vmap(obj);
1243 else if (obj->dev->driver->gem_prime_vmap)
1244 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1245 else
1246 vaddr = ERR_PTR(-EOPNOTSUPP);
1247
1248 if (!vaddr)
1249 vaddr = ERR_PTR(-ENOMEM);
1250
1251 return vaddr;
1252}
1253
1254void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1255{
1256 if (!vaddr)
1257 return;
1258
1259 if (obj->funcs && obj->funcs->vunmap)
1260 obj->funcs->vunmap(obj, vaddr);
1261 else if (obj->dev->driver->gem_prime_vunmap)
1262 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278int
1279drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1280 struct ww_acquire_ctx *acquire_ctx)
1281{
1282 int contended = -1;
1283 int i, ret;
1284
1285 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1286
1287retry:
1288 if (contended != -1) {
1289 struct drm_gem_object *obj = objs[contended];
1290
1291 ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
1292 acquire_ctx);
1293 if (ret) {
1294 ww_acquire_done(acquire_ctx);
1295 return ret;
1296 }
1297 }
1298
1299 for (i = 0; i < count; i++) {
1300 if (i == contended)
1301 continue;
1302
1303 ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
1304 acquire_ctx);
1305 if (ret) {
1306 int j;
1307
1308 for (j = 0; j < i; j++)
1309 ww_mutex_unlock(&objs[j]->resv->lock);
1310
1311 if (contended != -1 && contended >= i)
1312 ww_mutex_unlock(&objs[contended]->resv->lock);
1313
1314 if (ret == -EDEADLK) {
1315 contended = i;
1316 goto retry;
1317 }
1318
1319 ww_acquire_done(acquire_ctx);
1320 return ret;
1321 }
1322 }
1323
1324 ww_acquire_done(acquire_ctx);
1325
1326 return 0;
1327}
1328EXPORT_SYMBOL(drm_gem_lock_reservations);
1329
1330void
1331drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1332 struct ww_acquire_ctx *acquire_ctx)
1333{
1334 int i;
1335
1336 for (i = 0; i < count; i++)
1337 ww_mutex_unlock(&objs[i]->resv->lock);
1338
1339 ww_acquire_fini(acquire_ctx);
1340}
1341EXPORT_SYMBOL(drm_gem_unlock_reservations);
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353int drm_gem_fence_array_add(struct xarray *fence_array,
1354 struct dma_fence *fence)
1355{
1356 struct dma_fence *entry;
1357 unsigned long index;
1358 u32 id = 0;
1359 int ret;
1360
1361 if (!fence)
1362 return 0;
1363
1364
1365
1366
1367
1368 xa_for_each(fence_array, index, entry) {
1369 if (entry->context != fence->context)
1370 continue;
1371
1372 if (dma_fence_is_later(fence, entry)) {
1373 dma_fence_put(entry);
1374 xa_store(fence_array, index, fence, GFP_KERNEL);
1375 } else {
1376 dma_fence_put(fence);
1377 }
1378 return 0;
1379 }
1380
1381 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1382 if (ret != 0)
1383 dma_fence_put(fence);
1384
1385 return ret;
1386}
1387EXPORT_SYMBOL(drm_gem_fence_array_add);
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1404 struct drm_gem_object *obj,
1405 bool write)
1406{
1407 int ret;
1408 struct dma_fence **fences;
1409 unsigned int i, fence_count;
1410
1411 if (!write) {
1412 struct dma_fence *fence =
1413 reservation_object_get_excl_rcu(obj->resv);
1414
1415 return drm_gem_fence_array_add(fence_array, fence);
1416 }
1417
1418 ret = reservation_object_get_fences_rcu(obj->resv, NULL,
1419 &fence_count, &fences);
1420 if (ret || !fence_count)
1421 return ret;
1422
1423 for (i = 0; i < fence_count; i++) {
1424 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1425 if (ret)
1426 break;
1427 }
1428
1429 for (; i < fence_count; i++)
1430 dma_fence_put(fences[i]);
1431 kfree(fences);
1432 return ret;
1433}
1434EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1435