1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/anon_inodes.h>
35#include <linux/dma-fence.h>
36#include <linux/file.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/poll.h>
40#include <linux/slab.h>
41
42#include <drm/drm_client.h>
43#include <drm/drm_drv.h>
44#include <drm/drm_file.h>
45#include <drm/drm_print.h>
46
47#include "drm_crtc_internal.h"
48#include "drm_internal.h"
49#include "drm_legacy.h"
50
51#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
52#include <uapi/asm/mman.h>
53#include <drm/drm_vma_manager.h>
54#endif
55
56
57DEFINE_MUTEX(drm_global_mutex);
58
59bool drm_dev_needs_global_mutex(struct drm_device *dev)
60{
61
62
63
64
65
66 if (drm_core_check_feature(dev, DRIVER_LEGACY))
67 return true;
68
69
70
71
72
73
74
75 if (dev->driver->load || dev->driver->unload)
76 return true;
77
78
79
80
81
82
83
84 if (dev->driver->lastclose)
85 return true;
86
87 return false;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154struct drm_file *drm_file_alloc(struct drm_minor *minor)
155{
156 struct drm_device *dev = minor->dev;
157 struct drm_file *file;
158 int ret;
159
160 file = kzalloc(sizeof(*file), GFP_KERNEL);
161 if (!file)
162 return ERR_PTR(-ENOMEM);
163
164 file->pid = get_pid(task_pid(current));
165 file->minor = minor;
166
167
168 file->authenticated = capable(CAP_SYS_ADMIN);
169
170 INIT_LIST_HEAD(&file->lhead);
171 INIT_LIST_HEAD(&file->fbs);
172 mutex_init(&file->fbs_lock);
173 INIT_LIST_HEAD(&file->blobs);
174 INIT_LIST_HEAD(&file->pending_event_list);
175 INIT_LIST_HEAD(&file->event_list);
176 init_waitqueue_head(&file->event_wait);
177 file->event_space = 4096;
178
179 mutex_init(&file->event_read_lock);
180
181 if (drm_core_check_feature(dev, DRIVER_GEM))
182 drm_gem_open(dev, file);
183
184 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
185 drm_syncobj_open(file);
186
187 drm_prime_init_file_private(&file->prime);
188
189 if (dev->driver->open) {
190 ret = dev->driver->open(dev, file);
191 if (ret < 0)
192 goto out_prime_destroy;
193 }
194
195 return file;
196
197out_prime_destroy:
198 drm_prime_destroy_file_private(&file->prime);
199 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
200 drm_syncobj_release(file);
201 if (drm_core_check_feature(dev, DRIVER_GEM))
202 drm_gem_release(dev, file);
203 put_pid(file->pid);
204 kfree(file);
205
206 return ERR_PTR(ret);
207}
208
209static void drm_events_release(struct drm_file *file_priv)
210{
211 struct drm_device *dev = file_priv->minor->dev;
212 struct drm_pending_event *e, *et;
213 unsigned long flags;
214
215 spin_lock_irqsave(&dev->event_lock, flags);
216
217
218 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
219 pending_link) {
220 list_del(&e->pending_link);
221 e->file_priv = NULL;
222 }
223
224
225 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
226 list_del(&e->link);
227 kfree(e);
228 }
229
230 spin_unlock_irqrestore(&dev->event_lock, flags);
231}
232
233
234
235
236
237
238
239
240
241
242
243void drm_file_free(struct drm_file *file)
244{
245 struct drm_device *dev;
246
247 if (!file)
248 return;
249
250 dev = file->minor->dev;
251
252 DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
253 current->comm, task_pid_nr(current),
254 (long)old_encode_dev(file->minor->kdev->devt),
255 atomic_read(&dev->open_count));
256
257#ifdef CONFIG_DRM_LEGACY
258 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
259 dev->driver->preclose)
260 dev->driver->preclose(dev, file);
261#endif
262
263 if (drm_core_check_feature(dev, DRIVER_LEGACY))
264 drm_legacy_lock_release(dev, file->filp);
265
266 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
267 drm_legacy_reclaim_buffers(dev, file);
268
269 drm_events_release(file);
270
271 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
272 drm_fb_release(file);
273 drm_property_destroy_user_blobs(dev, file);
274 }
275
276 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
277 drm_syncobj_release(file);
278
279 if (drm_core_check_feature(dev, DRIVER_GEM))
280 drm_gem_release(dev, file);
281
282 drm_legacy_ctxbitmap_flush(dev, file);
283
284 if (drm_is_primary_client(file))
285 drm_master_release(file);
286
287 if (dev->driver->postclose)
288 dev->driver->postclose(dev, file);
289
290 drm_prime_destroy_file_private(&file->prime);
291
292 WARN_ON(!list_empty(&file->event_list));
293
294 put_pid(file->pid);
295 kfree(file);
296}
297
298static void drm_close_helper(struct file *filp)
299{
300 struct drm_file *file_priv = filp->private_data;
301 struct drm_device *dev = file_priv->minor->dev;
302
303 mutex_lock(&dev->filelist_mutex);
304 list_del(&file_priv->lhead);
305 mutex_unlock(&dev->filelist_mutex);
306
307 drm_file_free(file_priv);
308}
309
310
311
312
313
314
315static int drm_cpu_valid(void)
316{
317#if defined(__sparc__) && !defined(__sparc_v9__)
318 return 0;
319#endif
320 return 1;
321}
322
323
324
325
326
327
328
329
330
331
332
333static int drm_open_helper(struct file *filp, struct drm_minor *minor)
334{
335 struct drm_device *dev = minor->dev;
336 struct drm_file *priv;
337 int ret;
338
339 if (filp->f_flags & O_EXCL)
340 return -EBUSY;
341 if (!drm_cpu_valid())
342 return -EINVAL;
343 if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
344 dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
345 return -EINVAL;
346
347 DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
348 task_pid_nr(current), minor->index);
349
350 priv = drm_file_alloc(minor);
351 if (IS_ERR(priv))
352 return PTR_ERR(priv);
353
354 if (drm_is_primary_client(priv)) {
355 ret = drm_master_open(priv);
356 if (ret) {
357 drm_file_free(priv);
358 return ret;
359 }
360 }
361
362 filp->private_data = priv;
363 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
364 priv->filp = filp;
365
366 mutex_lock(&dev->filelist_mutex);
367 list_add(&priv->lhead, &dev->filelist);
368 mutex_unlock(&dev->filelist_mutex);
369
370#ifdef CONFIG_DRM_LEGACY
371#ifdef __alpha__
372
373
374
375 if (!dev->hose) {
376 struct pci_dev *pci_dev;
377
378 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
379 if (pci_dev) {
380 dev->hose = pci_dev->sysdata;
381 pci_dev_put(pci_dev);
382 }
383 if (!dev->hose) {
384 struct pci_bus *b = list_entry(pci_root_buses.next,
385 struct pci_bus, node);
386 if (b)
387 dev->hose = b->sysdata;
388 }
389 }
390#endif
391#endif
392
393 return 0;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int drm_open(struct inode *inode, struct file *filp)
410{
411 struct drm_device *dev;
412 struct drm_minor *minor;
413 int retcode;
414 int need_setup = 0;
415
416 minor = drm_minor_acquire(iminor(inode));
417 if (IS_ERR(minor))
418 return PTR_ERR(minor);
419
420 dev = minor->dev;
421 if (drm_dev_needs_global_mutex(dev))
422 mutex_lock(&drm_global_mutex);
423
424 if (!atomic_fetch_inc(&dev->open_count))
425 need_setup = 1;
426
427
428 filp->f_mapping = dev->anon_inode->i_mapping;
429
430 retcode = drm_open_helper(filp, minor);
431 if (retcode)
432 goto err_undo;
433 if (need_setup) {
434 retcode = drm_legacy_setup(dev);
435 if (retcode) {
436 drm_close_helper(filp);
437 goto err_undo;
438 }
439 }
440
441 if (drm_dev_needs_global_mutex(dev))
442 mutex_unlock(&drm_global_mutex);
443
444 return 0;
445
446err_undo:
447 atomic_dec(&dev->open_count);
448 if (drm_dev_needs_global_mutex(dev))
449 mutex_unlock(&drm_global_mutex);
450 drm_minor_release(minor);
451 return retcode;
452}
453EXPORT_SYMBOL(drm_open);
454
455void drm_lastclose(struct drm_device * dev)
456{
457 DRM_DEBUG("\n");
458
459 if (dev->driver->lastclose)
460 dev->driver->lastclose(dev);
461 DRM_DEBUG("driver lastclose completed\n");
462
463 if (drm_core_check_feature(dev, DRIVER_LEGACY))
464 drm_legacy_dev_reinit(dev);
465
466 drm_client_dev_restore(dev);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483int drm_release(struct inode *inode, struct file *filp)
484{
485 struct drm_file *file_priv = filp->private_data;
486 struct drm_minor *minor = file_priv->minor;
487 struct drm_device *dev = minor->dev;
488
489 if (drm_dev_needs_global_mutex(dev))
490 mutex_lock(&drm_global_mutex);
491
492 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
493
494 drm_close_helper(filp);
495
496 if (atomic_dec_and_test(&dev->open_count))
497 drm_lastclose(dev);
498
499 if (drm_dev_needs_global_mutex(dev))
500 mutex_unlock(&drm_global_mutex);
501
502 drm_minor_release(minor);
503
504 return 0;
505}
506EXPORT_SYMBOL(drm_release);
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523int drm_release_noglobal(struct inode *inode, struct file *filp)
524{
525 struct drm_file *file_priv = filp->private_data;
526 struct drm_minor *minor = file_priv->minor;
527 struct drm_device *dev = minor->dev;
528
529 drm_close_helper(filp);
530
531 if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
532 drm_lastclose(dev);
533 mutex_unlock(&drm_global_mutex);
534 }
535
536 drm_minor_release(minor);
537
538 return 0;
539}
540EXPORT_SYMBOL(drm_release_noglobal);
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568ssize_t drm_read(struct file *filp, char __user *buffer,
569 size_t count, loff_t *offset)
570{
571 struct drm_file *file_priv = filp->private_data;
572 struct drm_device *dev = file_priv->minor->dev;
573 ssize_t ret;
574
575 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
576 if (ret)
577 return ret;
578
579 for (;;) {
580 struct drm_pending_event *e = NULL;
581
582 spin_lock_irq(&dev->event_lock);
583 if (!list_empty(&file_priv->event_list)) {
584 e = list_first_entry(&file_priv->event_list,
585 struct drm_pending_event, link);
586 file_priv->event_space += e->event->length;
587 list_del(&e->link);
588 }
589 spin_unlock_irq(&dev->event_lock);
590
591 if (e == NULL) {
592 if (ret)
593 break;
594
595 if (filp->f_flags & O_NONBLOCK) {
596 ret = -EAGAIN;
597 break;
598 }
599
600 mutex_unlock(&file_priv->event_read_lock);
601 ret = wait_event_interruptible(file_priv->event_wait,
602 !list_empty(&file_priv->event_list));
603 if (ret >= 0)
604 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
605 if (ret)
606 return ret;
607 } else {
608 unsigned length = e->event->length;
609
610 if (length > count - ret) {
611put_back_event:
612 spin_lock_irq(&dev->event_lock);
613 file_priv->event_space -= length;
614 list_add(&e->link, &file_priv->event_list);
615 spin_unlock_irq(&dev->event_lock);
616 wake_up_interruptible_poll(&file_priv->event_wait,
617 EPOLLIN | EPOLLRDNORM);
618 break;
619 }
620
621 if (copy_to_user(buffer + ret, e->event, length)) {
622 if (ret == 0)
623 ret = -EFAULT;
624 goto put_back_event;
625 }
626
627 ret += length;
628 kfree(e);
629 }
630 }
631 mutex_unlock(&file_priv->event_read_lock);
632
633 return ret;
634}
635EXPORT_SYMBOL(drm_read);
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
654{
655 struct drm_file *file_priv = filp->private_data;
656 __poll_t mask = 0;
657
658 poll_wait(filp, &file_priv->event_wait, wait);
659
660 if (!list_empty(&file_priv->event_list))
661 mask |= EPOLLIN | EPOLLRDNORM;
662
663 return mask;
664}
665EXPORT_SYMBOL(drm_poll);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691int drm_event_reserve_init_locked(struct drm_device *dev,
692 struct drm_file *file_priv,
693 struct drm_pending_event *p,
694 struct drm_event *e)
695{
696 if (file_priv->event_space < e->length)
697 return -ENOMEM;
698
699 file_priv->event_space -= e->length;
700
701 p->event = e;
702 list_add(&p->pending_link, &file_priv->pending_event_list);
703 p->file_priv = file_priv;
704
705 return 0;
706}
707EXPORT_SYMBOL(drm_event_reserve_init_locked);
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733int drm_event_reserve_init(struct drm_device *dev,
734 struct drm_file *file_priv,
735 struct drm_pending_event *p,
736 struct drm_event *e)
737{
738 unsigned long flags;
739 int ret;
740
741 spin_lock_irqsave(&dev->event_lock, flags);
742 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
743 spin_unlock_irqrestore(&dev->event_lock, flags);
744
745 return ret;
746}
747EXPORT_SYMBOL(drm_event_reserve_init);
748
749
750
751
752
753
754
755
756
757
758void drm_event_cancel_free(struct drm_device *dev,
759 struct drm_pending_event *p)
760{
761 unsigned long flags;
762
763 spin_lock_irqsave(&dev->event_lock, flags);
764 if (p->file_priv) {
765 p->file_priv->event_space += p->event->length;
766 list_del(&p->pending_link);
767 }
768 spin_unlock_irqrestore(&dev->event_lock, flags);
769
770 if (p->fence)
771 dma_fence_put(p->fence);
772
773 kfree(p);
774}
775EXPORT_SYMBOL(drm_event_cancel_free);
776
777
778
779
780
781
782
783
784
785
786
787
788
789void drm_send_event_helper(struct drm_device *dev,
790 struct drm_pending_event *e, ktime_t timestamp)
791{
792 assert_spin_locked(&dev->event_lock);
793
794 if (e->completion) {
795 complete_all(e->completion);
796 e->completion_release(e->completion);
797 e->completion = NULL;
798 }
799
800 if (e->fence) {
801 if (timestamp)
802 dma_fence_signal_timestamp(e->fence, timestamp);
803 else
804 dma_fence_signal(e->fence);
805 dma_fence_put(e->fence);
806 }
807
808 if (!e->file_priv) {
809 kfree(e);
810 return;
811 }
812
813 list_del(&e->pending_link);
814 list_add_tail(&e->link,
815 &e->file_priv->event_list);
816 wake_up_interruptible_poll(&e->file_priv->event_wait,
817 EPOLLIN | EPOLLRDNORM);
818}
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836void drm_send_event_timestamp_locked(struct drm_device *dev,
837 struct drm_pending_event *e, ktime_t timestamp)
838{
839 drm_send_event_helper(dev, e, timestamp);
840}
841EXPORT_SYMBOL(drm_send_event_timestamp_locked);
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
858{
859 drm_send_event_helper(dev, e, 0);
860}
861EXPORT_SYMBOL(drm_send_event_locked);
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
879{
880 unsigned long irqflags;
881
882 spin_lock_irqsave(&dev->event_lock, irqflags);
883 drm_send_event_helper(dev, e, 0);
884 spin_unlock_irqrestore(&dev->event_lock, irqflags);
885}
886EXPORT_SYMBOL(drm_send_event);
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
903{
904 struct drm_device *dev = minor->dev;
905 struct drm_file *priv;
906 struct file *file;
907
908 priv = drm_file_alloc(minor);
909 if (IS_ERR(priv))
910 return ERR_CAST(priv);
911
912 file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
913 if (IS_ERR(file)) {
914 drm_file_free(priv);
915 return file;
916 }
917
918
919 file->f_mapping = dev->anon_inode->i_mapping;
920
921 drm_dev_get(dev);
922 priv->filp = file;
923
924 return file;
925}
926EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
927
928#ifdef CONFIG_MMU
929#ifdef CONFIG_TRANSPARENT_HUGEPAGE
930
931
932
933
934
935static unsigned long drm_addr_inflate(unsigned long addr,
936 unsigned long len,
937 unsigned long pgoff,
938 unsigned long flags,
939 unsigned long huge_size)
940{
941 unsigned long offset, inflated_len;
942 unsigned long inflated_addr;
943 unsigned long inflated_offset;
944
945 offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
946 if (offset && offset + len < 2 * huge_size)
947 return addr;
948 if ((addr & (huge_size - 1)) == offset)
949 return addr;
950
951 inflated_len = len + huge_size - PAGE_SIZE;
952 if (inflated_len > TASK_SIZE)
953 return addr;
954 if (inflated_len < len)
955 return addr;
956
957 inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
958 0, flags);
959 if (IS_ERR_VALUE(inflated_addr))
960 return addr;
961 if (inflated_addr & ~PAGE_MASK)
962 return addr;
963
964 inflated_offset = inflated_addr & (huge_size - 1);
965 inflated_addr += offset - inflated_offset;
966 if (inflated_offset > offset)
967 inflated_addr += huge_size;
968
969 if (inflated_addr > TASK_SIZE - len)
970 return addr;
971
972 return inflated_addr;
973}
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997unsigned long drm_get_unmapped_area(struct file *file,
998 unsigned long uaddr, unsigned long len,
999 unsigned long pgoff, unsigned long flags,
1000 struct drm_vma_offset_manager *mgr)
1001{
1002 unsigned long addr;
1003 unsigned long inflated_addr;
1004 struct drm_vma_offset_node *node;
1005
1006 if (len > TASK_SIZE)
1007 return -ENOMEM;
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 drm_vma_offset_lock_lookup(mgr);
1018 node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
1019 if (node)
1020 pgoff -= node->vm_node.start;
1021 drm_vma_offset_unlock_lookup(mgr);
1022
1023 addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1024 if (IS_ERR_VALUE(addr))
1025 return addr;
1026 if (addr & ~PAGE_MASK)
1027 return addr;
1028 if (addr > TASK_SIZE - len)
1029 return addr;
1030
1031 if (len < HPAGE_PMD_SIZE)
1032 return addr;
1033 if (flags & MAP_FIXED)
1034 return addr;
1035
1036
1037
1038
1039
1040 if (uaddr)
1041 return addr;
1042
1043 inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1044 HPAGE_PMD_SIZE);
1045
1046 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1047 len >= HPAGE_PUD_SIZE)
1048 inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1049 flags, HPAGE_PUD_SIZE);
1050 return inflated_addr;
1051}
1052#else
1053unsigned long drm_get_unmapped_area(struct file *file,
1054 unsigned long uaddr, unsigned long len,
1055 unsigned long pgoff, unsigned long flags,
1056 struct drm_vma_offset_manager *mgr)
1057{
1058 return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1059}
1060#endif
1061EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1062#endif
1063