1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/anon_inodes.h>
35#include <linux/dma-fence.h>
36#include <linux/file.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/poll.h>
40#include <linux/slab.h>
41
42#include <drm/drm_client.h>
43#include <drm/drm_drv.h>
44#include <drm/drm_file.h>
45#include <drm/drm_print.h>
46
47#include "drm_crtc_internal.h"
48#include "drm_internal.h"
49#include "drm_legacy.h"
50
51#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
52#include <uapi/asm/mman.h>
53#include <drm/drm_vma_manager.h>
54#endif
55
56
57DEFINE_MUTEX(drm_global_mutex);
58
59bool drm_dev_needs_global_mutex(struct drm_device *dev)
60{
61
62
63
64
65
66 if (drm_core_check_feature(dev, DRIVER_LEGACY))
67 return true;
68
69
70
71
72
73
74
75 if (dev->driver->load || dev->driver->unload)
76 return true;
77
78
79
80
81
82
83
84 if (dev->driver->lastclose)
85 return true;
86
87 return false;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154struct drm_file *drm_file_alloc(struct drm_minor *minor)
155{
156 struct drm_device *dev = minor->dev;
157 struct drm_file *file;
158 int ret;
159
160 file = kzalloc(sizeof(*file), GFP_KERNEL);
161 if (!file)
162 return ERR_PTR(-ENOMEM);
163
164 file->pid = get_pid(task_pid(current));
165 file->minor = minor;
166
167
168 file->authenticated = capable(CAP_SYS_ADMIN);
169
170 INIT_LIST_HEAD(&file->lhead);
171 INIT_LIST_HEAD(&file->fbs);
172 mutex_init(&file->fbs_lock);
173 INIT_LIST_HEAD(&file->blobs);
174 INIT_LIST_HEAD(&file->pending_event_list);
175 INIT_LIST_HEAD(&file->event_list);
176 init_waitqueue_head(&file->event_wait);
177 file->event_space = 4096;
178
179 spin_lock_init(&file->master_lookup_lock);
180 mutex_init(&file->event_read_lock);
181
182 if (drm_core_check_feature(dev, DRIVER_GEM))
183 drm_gem_open(dev, file);
184
185 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
186 drm_syncobj_open(file);
187
188 drm_prime_init_file_private(&file->prime);
189
190 if (dev->driver->open) {
191 ret = dev->driver->open(dev, file);
192 if (ret < 0)
193 goto out_prime_destroy;
194 }
195
196 return file;
197
198out_prime_destroy:
199 drm_prime_destroy_file_private(&file->prime);
200 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
201 drm_syncobj_release(file);
202 if (drm_core_check_feature(dev, DRIVER_GEM))
203 drm_gem_release(dev, file);
204 put_pid(file->pid);
205 kfree(file);
206
207 return ERR_PTR(ret);
208}
209
210static void drm_events_release(struct drm_file *file_priv)
211{
212 struct drm_device *dev = file_priv->minor->dev;
213 struct drm_pending_event *e, *et;
214 unsigned long flags;
215
216 spin_lock_irqsave(&dev->event_lock, flags);
217
218
219 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
220 pending_link) {
221 list_del(&e->pending_link);
222 e->file_priv = NULL;
223 }
224
225
226 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
227 list_del(&e->link);
228 kfree(e);
229 }
230
231 spin_unlock_irqrestore(&dev->event_lock, flags);
232}
233
234
235
236
237
238
239
240
241
242
243
244void drm_file_free(struct drm_file *file)
245{
246 struct drm_device *dev;
247
248 if (!file)
249 return;
250
251 dev = file->minor->dev;
252
253 DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
254 current->comm, task_pid_nr(current),
255 (long)old_encode_dev(file->minor->kdev->devt),
256 atomic_read(&dev->open_count));
257
258#ifdef CONFIG_DRM_LEGACY
259 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
260 dev->driver->preclose)
261 dev->driver->preclose(dev, file);
262#endif
263
264 if (drm_core_check_feature(dev, DRIVER_LEGACY))
265 drm_legacy_lock_release(dev, file->filp);
266
267 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
268 drm_legacy_reclaim_buffers(dev, file);
269
270 drm_events_release(file);
271
272 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
273 drm_fb_release(file);
274 drm_property_destroy_user_blobs(dev, file);
275 }
276
277 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
278 drm_syncobj_release(file);
279
280 if (drm_core_check_feature(dev, DRIVER_GEM))
281 drm_gem_release(dev, file);
282
283 drm_legacy_ctxbitmap_flush(dev, file);
284
285 if (drm_is_primary_client(file))
286 drm_master_release(file);
287
288 if (dev->driver->postclose)
289 dev->driver->postclose(dev, file);
290
291 drm_prime_destroy_file_private(&file->prime);
292
293 WARN_ON(!list_empty(&file->event_list));
294
295 put_pid(file->pid);
296 kfree(file);
297}
298
299static void drm_close_helper(struct file *filp)
300{
301 struct drm_file *file_priv = filp->private_data;
302 struct drm_device *dev = file_priv->minor->dev;
303
304 mutex_lock(&dev->filelist_mutex);
305 list_del(&file_priv->lhead);
306 mutex_unlock(&dev->filelist_mutex);
307
308 drm_file_free(file_priv);
309}
310
311
312
313
314
315
316static int drm_cpu_valid(void)
317{
318#if defined(__sparc__) && !defined(__sparc_v9__)
319 return 0;
320#endif
321 return 1;
322}
323
324
325
326
327
328
329
330
331
332
333
334static int drm_open_helper(struct file *filp, struct drm_minor *minor)
335{
336 struct drm_device *dev = minor->dev;
337 struct drm_file *priv;
338 int ret;
339
340 if (filp->f_flags & O_EXCL)
341 return -EBUSY;
342 if (!drm_cpu_valid())
343 return -EINVAL;
344 if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
345 dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
346 return -EINVAL;
347
348 DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
349 task_pid_nr(current), minor->index);
350
351 priv = drm_file_alloc(minor);
352 if (IS_ERR(priv))
353 return PTR_ERR(priv);
354
355 if (drm_is_primary_client(priv)) {
356 ret = drm_master_open(priv);
357 if (ret) {
358 drm_file_free(priv);
359 return ret;
360 }
361 }
362
363 filp->private_data = priv;
364 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
365 priv->filp = filp;
366
367 mutex_lock(&dev->filelist_mutex);
368 list_add(&priv->lhead, &dev->filelist);
369 mutex_unlock(&dev->filelist_mutex);
370
371#ifdef CONFIG_DRM_LEGACY
372#ifdef __alpha__
373
374
375
376 if (!dev->hose) {
377 struct pci_dev *pci_dev;
378
379 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
380 if (pci_dev) {
381 dev->hose = pci_dev->sysdata;
382 pci_dev_put(pci_dev);
383 }
384 if (!dev->hose) {
385 struct pci_bus *b = list_entry(pci_root_buses.next,
386 struct pci_bus, node);
387 if (b)
388 dev->hose = b->sysdata;
389 }
390 }
391#endif
392#endif
393
394 return 0;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410int drm_open(struct inode *inode, struct file *filp)
411{
412 struct drm_device *dev;
413 struct drm_minor *minor;
414 int retcode;
415 int need_setup = 0;
416
417 minor = drm_minor_acquire(iminor(inode));
418 if (IS_ERR(minor))
419 return PTR_ERR(minor);
420
421 dev = minor->dev;
422 if (drm_dev_needs_global_mutex(dev))
423 mutex_lock(&drm_global_mutex);
424
425 if (!atomic_fetch_inc(&dev->open_count))
426 need_setup = 1;
427
428
429 filp->f_mapping = dev->anon_inode->i_mapping;
430
431 retcode = drm_open_helper(filp, minor);
432 if (retcode)
433 goto err_undo;
434 if (need_setup) {
435 retcode = drm_legacy_setup(dev);
436 if (retcode) {
437 drm_close_helper(filp);
438 goto err_undo;
439 }
440 }
441
442 if (drm_dev_needs_global_mutex(dev))
443 mutex_unlock(&drm_global_mutex);
444
445 return 0;
446
447err_undo:
448 atomic_dec(&dev->open_count);
449 if (drm_dev_needs_global_mutex(dev))
450 mutex_unlock(&drm_global_mutex);
451 drm_minor_release(minor);
452 return retcode;
453}
454EXPORT_SYMBOL(drm_open);
455
456void drm_lastclose(struct drm_device * dev)
457{
458 DRM_DEBUG("\n");
459
460 if (dev->driver->lastclose)
461 dev->driver->lastclose(dev);
462 DRM_DEBUG("driver lastclose completed\n");
463
464 if (drm_core_check_feature(dev, DRIVER_LEGACY))
465 drm_legacy_dev_reinit(dev);
466
467 drm_client_dev_restore(dev);
468}
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484int drm_release(struct inode *inode, struct file *filp)
485{
486 struct drm_file *file_priv = filp->private_data;
487 struct drm_minor *minor = file_priv->minor;
488 struct drm_device *dev = minor->dev;
489
490 if (drm_dev_needs_global_mutex(dev))
491 mutex_lock(&drm_global_mutex);
492
493 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
494
495 drm_close_helper(filp);
496
497 if (atomic_dec_and_test(&dev->open_count))
498 drm_lastclose(dev);
499
500 if (drm_dev_needs_global_mutex(dev))
501 mutex_unlock(&drm_global_mutex);
502
503 drm_minor_release(minor);
504
505 return 0;
506}
507EXPORT_SYMBOL(drm_release);
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524int drm_release_noglobal(struct inode *inode, struct file *filp)
525{
526 struct drm_file *file_priv = filp->private_data;
527 struct drm_minor *minor = file_priv->minor;
528 struct drm_device *dev = minor->dev;
529
530 drm_close_helper(filp);
531
532 if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
533 drm_lastclose(dev);
534 mutex_unlock(&drm_global_mutex);
535 }
536
537 drm_minor_release(minor);
538
539 return 0;
540}
541EXPORT_SYMBOL(drm_release_noglobal);
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569ssize_t drm_read(struct file *filp, char __user *buffer,
570 size_t count, loff_t *offset)
571{
572 struct drm_file *file_priv = filp->private_data;
573 struct drm_device *dev = file_priv->minor->dev;
574 ssize_t ret;
575
576 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
577 if (ret)
578 return ret;
579
580 for (;;) {
581 struct drm_pending_event *e = NULL;
582
583 spin_lock_irq(&dev->event_lock);
584 if (!list_empty(&file_priv->event_list)) {
585 e = list_first_entry(&file_priv->event_list,
586 struct drm_pending_event, link);
587 file_priv->event_space += e->event->length;
588 list_del(&e->link);
589 }
590 spin_unlock_irq(&dev->event_lock);
591
592 if (e == NULL) {
593 if (ret)
594 break;
595
596 if (filp->f_flags & O_NONBLOCK) {
597 ret = -EAGAIN;
598 break;
599 }
600
601 mutex_unlock(&file_priv->event_read_lock);
602 ret = wait_event_interruptible(file_priv->event_wait,
603 !list_empty(&file_priv->event_list));
604 if (ret >= 0)
605 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
606 if (ret)
607 return ret;
608 } else {
609 unsigned length = e->event->length;
610
611 if (length > count - ret) {
612put_back_event:
613 spin_lock_irq(&dev->event_lock);
614 file_priv->event_space -= length;
615 list_add(&e->link, &file_priv->event_list);
616 spin_unlock_irq(&dev->event_lock);
617 wake_up_interruptible_poll(&file_priv->event_wait,
618 EPOLLIN | EPOLLRDNORM);
619 break;
620 }
621
622 if (copy_to_user(buffer + ret, e->event, length)) {
623 if (ret == 0)
624 ret = -EFAULT;
625 goto put_back_event;
626 }
627
628 ret += length;
629 kfree(e);
630 }
631 }
632 mutex_unlock(&file_priv->event_read_lock);
633
634 return ret;
635}
636EXPORT_SYMBOL(drm_read);
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
655{
656 struct drm_file *file_priv = filp->private_data;
657 __poll_t mask = 0;
658
659 poll_wait(filp, &file_priv->event_wait, wait);
660
661 if (!list_empty(&file_priv->event_list))
662 mask |= EPOLLIN | EPOLLRDNORM;
663
664 return mask;
665}
666EXPORT_SYMBOL(drm_poll);
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692int drm_event_reserve_init_locked(struct drm_device *dev,
693 struct drm_file *file_priv,
694 struct drm_pending_event *p,
695 struct drm_event *e)
696{
697 if (file_priv->event_space < e->length)
698 return -ENOMEM;
699
700 file_priv->event_space -= e->length;
701
702 p->event = e;
703 list_add(&p->pending_link, &file_priv->pending_event_list);
704 p->file_priv = file_priv;
705
706 return 0;
707}
708EXPORT_SYMBOL(drm_event_reserve_init_locked);
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734int drm_event_reserve_init(struct drm_device *dev,
735 struct drm_file *file_priv,
736 struct drm_pending_event *p,
737 struct drm_event *e)
738{
739 unsigned long flags;
740 int ret;
741
742 spin_lock_irqsave(&dev->event_lock, flags);
743 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
744 spin_unlock_irqrestore(&dev->event_lock, flags);
745
746 return ret;
747}
748EXPORT_SYMBOL(drm_event_reserve_init);
749
750
751
752
753
754
755
756
757
758
759void drm_event_cancel_free(struct drm_device *dev,
760 struct drm_pending_event *p)
761{
762 unsigned long flags;
763
764 spin_lock_irqsave(&dev->event_lock, flags);
765 if (p->file_priv) {
766 p->file_priv->event_space += p->event->length;
767 list_del(&p->pending_link);
768 }
769 spin_unlock_irqrestore(&dev->event_lock, flags);
770
771 if (p->fence)
772 dma_fence_put(p->fence);
773
774 kfree(p);
775}
776EXPORT_SYMBOL(drm_event_cancel_free);
777
778static void drm_send_event_helper(struct drm_device *dev,
779 struct drm_pending_event *e, ktime_t timestamp)
780{
781 assert_spin_locked(&dev->event_lock);
782
783 if (e->completion) {
784 complete_all(e->completion);
785 e->completion_release(e->completion);
786 e->completion = NULL;
787 }
788
789 if (e->fence) {
790 if (timestamp)
791 dma_fence_signal_timestamp(e->fence, timestamp);
792 else
793 dma_fence_signal(e->fence);
794 dma_fence_put(e->fence);
795 }
796
797 if (!e->file_priv) {
798 kfree(e);
799 return;
800 }
801
802 list_del(&e->pending_link);
803 list_add_tail(&e->link,
804 &e->file_priv->event_list);
805 wake_up_interruptible_poll(&e->file_priv->event_wait,
806 EPOLLIN | EPOLLRDNORM);
807}
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825void drm_send_event_timestamp_locked(struct drm_device *dev,
826 struct drm_pending_event *e, ktime_t timestamp)
827{
828 drm_send_event_helper(dev, e, timestamp);
829}
830EXPORT_SYMBOL(drm_send_event_timestamp_locked);
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
847{
848 drm_send_event_helper(dev, e, 0);
849}
850EXPORT_SYMBOL(drm_send_event_locked);
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
868{
869 unsigned long irqflags;
870
871 spin_lock_irqsave(&dev->event_lock, irqflags);
872 drm_send_event_helper(dev, e, 0);
873 spin_unlock_irqrestore(&dev->event_lock, irqflags);
874}
875EXPORT_SYMBOL(drm_send_event);
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
892{
893 struct drm_device *dev = minor->dev;
894 struct drm_file *priv;
895 struct file *file;
896
897 priv = drm_file_alloc(minor);
898 if (IS_ERR(priv))
899 return ERR_CAST(priv);
900
901 file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
902 if (IS_ERR(file)) {
903 drm_file_free(priv);
904 return file;
905 }
906
907
908 file->f_mapping = dev->anon_inode->i_mapping;
909
910 drm_dev_get(dev);
911 priv->filp = file;
912
913 return file;
914}
915EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
916
917#ifdef CONFIG_MMU
918#ifdef CONFIG_TRANSPARENT_HUGEPAGE
919
920
921
922
923
924static unsigned long drm_addr_inflate(unsigned long addr,
925 unsigned long len,
926 unsigned long pgoff,
927 unsigned long flags,
928 unsigned long huge_size)
929{
930 unsigned long offset, inflated_len;
931 unsigned long inflated_addr;
932 unsigned long inflated_offset;
933
934 offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
935 if (offset && offset + len < 2 * huge_size)
936 return addr;
937 if ((addr & (huge_size - 1)) == offset)
938 return addr;
939
940 inflated_len = len + huge_size - PAGE_SIZE;
941 if (inflated_len > TASK_SIZE)
942 return addr;
943 if (inflated_len < len)
944 return addr;
945
946 inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
947 0, flags);
948 if (IS_ERR_VALUE(inflated_addr))
949 return addr;
950 if (inflated_addr & ~PAGE_MASK)
951 return addr;
952
953 inflated_offset = inflated_addr & (huge_size - 1);
954 inflated_addr += offset - inflated_offset;
955 if (inflated_offset > offset)
956 inflated_addr += huge_size;
957
958 if (inflated_addr > TASK_SIZE - len)
959 return addr;
960
961 return inflated_addr;
962}
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986unsigned long drm_get_unmapped_area(struct file *file,
987 unsigned long uaddr, unsigned long len,
988 unsigned long pgoff, unsigned long flags,
989 struct drm_vma_offset_manager *mgr)
990{
991 unsigned long addr;
992 unsigned long inflated_addr;
993 struct drm_vma_offset_node *node;
994
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998
999
1000
1001
1002
1003
1004
1005
1006 drm_vma_offset_lock_lookup(mgr);
1007 node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
1008 if (node)
1009 pgoff -= node->vm_node.start;
1010 drm_vma_offset_unlock_lookup(mgr);
1011
1012 addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1013 if (IS_ERR_VALUE(addr))
1014 return addr;
1015 if (addr & ~PAGE_MASK)
1016 return addr;
1017 if (addr > TASK_SIZE - len)
1018 return addr;
1019
1020 if (len < HPAGE_PMD_SIZE)
1021 return addr;
1022 if (flags & MAP_FIXED)
1023 return addr;
1024
1025
1026
1027
1028
1029 if (uaddr)
1030 return addr;
1031
1032 inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1033 HPAGE_PMD_SIZE);
1034
1035 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1036 len >= HPAGE_PUD_SIZE)
1037 inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1038 flags, HPAGE_PUD_SIZE);
1039 return inflated_addr;
1040}
1041#else
1042unsigned long drm_get_unmapped_area(struct file *file,
1043 unsigned long uaddr, unsigned long len,
1044 unsigned long pgoff, unsigned long flags,
1045 struct drm_vma_offset_manager *mgr)
1046{
1047 return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1048}
1049#endif
1050EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1051#endif
1052