1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/anon_inodes.h>
35#include <linux/dma-fence.h>
36#include <linux/file.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/poll.h>
40#include <linux/slab.h>
41
42#include <drm/drm_client.h>
43#include <drm/drm_drv.h>
44#include <drm/drm_file.h>
45#include <drm/drm_print.h>
46
47#include "drm_crtc_internal.h"
48#include "drm_internal.h"
49#include "drm_legacy.h"
50
51#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
52#include <uapi/asm/mman.h>
53#include <drm/drm_vma_manager.h>
54#endif
55
56
57DEFINE_MUTEX(drm_global_mutex);
58
59bool drm_dev_needs_global_mutex(struct drm_device *dev)
60{
61
62
63
64
65
66 if (drm_core_check_feature(dev, DRIVER_LEGACY))
67 return true;
68
69
70
71
72
73
74
75 if (dev->driver->load || dev->driver->unload)
76 return true;
77
78
79
80
81
82
83
84 if (dev->driver->lastclose)
85 return true;
86
87 return false;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155struct drm_file *drm_file_alloc(struct drm_minor *minor)
156{
157 struct drm_device *dev = minor->dev;
158 struct drm_file *file;
159 int ret;
160
161 file = kzalloc(sizeof(*file), GFP_KERNEL);
162 if (!file)
163 return ERR_PTR(-ENOMEM);
164
165 file->pid = get_pid(task_pid(current));
166 file->minor = minor;
167
168
169 file->authenticated = capable(CAP_SYS_ADMIN);
170
171 INIT_LIST_HEAD(&file->lhead);
172 INIT_LIST_HEAD(&file->fbs);
173 mutex_init(&file->fbs_lock);
174 INIT_LIST_HEAD(&file->blobs);
175 INIT_LIST_HEAD(&file->pending_event_list);
176 INIT_LIST_HEAD(&file->event_list);
177 init_waitqueue_head(&file->event_wait);
178 file->event_space = 4096;
179
180 mutex_init(&file->event_read_lock);
181
182 if (drm_core_check_feature(dev, DRIVER_GEM))
183 drm_gem_open(dev, file);
184
185 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
186 drm_syncobj_open(file);
187
188 drm_prime_init_file_private(&file->prime);
189
190 if (dev->driver->open) {
191 ret = dev->driver->open(dev, file);
192 if (ret < 0)
193 goto out_prime_destroy;
194 }
195
196 return file;
197
198out_prime_destroy:
199 drm_prime_destroy_file_private(&file->prime);
200 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
201 drm_syncobj_release(file);
202 if (drm_core_check_feature(dev, DRIVER_GEM))
203 drm_gem_release(dev, file);
204 put_pid(file->pid);
205 kfree(file);
206
207 return ERR_PTR(ret);
208}
209
210static void drm_events_release(struct drm_file *file_priv)
211{
212 struct drm_device *dev = file_priv->minor->dev;
213 struct drm_pending_event *e, *et;
214 unsigned long flags;
215
216 spin_lock_irqsave(&dev->event_lock, flags);
217
218
219 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
220 pending_link) {
221 list_del(&e->pending_link);
222 e->file_priv = NULL;
223 }
224
225
226 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
227 list_del(&e->link);
228 kfree(e);
229 }
230
231 spin_unlock_irqrestore(&dev->event_lock, flags);
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247void drm_file_free(struct drm_file *file)
248{
249 struct drm_device *dev;
250
251 if (!file)
252 return;
253
254 dev = file->minor->dev;
255
256 DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
257 current->comm, task_pid_nr(current),
258 (long)old_encode_dev(file->minor->kdev->devt),
259 atomic_read(&dev->open_count));
260
261#ifdef CONFIG_DRM_LEGACY
262 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
263 dev->driver->preclose)
264 dev->driver->preclose(dev, file);
265#endif
266
267 if (drm_core_check_feature(dev, DRIVER_LEGACY))
268 drm_legacy_lock_release(dev, file->filp);
269
270 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
271 drm_legacy_reclaim_buffers(dev, file);
272
273 drm_events_release(file);
274
275 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
276 drm_fb_release(file);
277 drm_property_destroy_user_blobs(dev, file);
278 }
279
280 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
281 drm_syncobj_release(file);
282
283 if (drm_core_check_feature(dev, DRIVER_GEM))
284 drm_gem_release(dev, file);
285
286 drm_legacy_ctxbitmap_flush(dev, file);
287
288 if (drm_is_primary_client(file))
289 drm_master_release(file);
290
291 if (dev->driver->postclose)
292 dev->driver->postclose(dev, file);
293
294 drm_prime_destroy_file_private(&file->prime);
295
296 WARN_ON(!list_empty(&file->event_list));
297
298 put_pid(file->pid);
299 kfree(file);
300}
301
302static void drm_close_helper(struct file *filp)
303{
304 struct drm_file *file_priv = filp->private_data;
305 struct drm_device *dev = file_priv->minor->dev;
306
307 mutex_lock(&dev->filelist_mutex);
308 list_del(&file_priv->lhead);
309 mutex_unlock(&dev->filelist_mutex);
310
311 drm_file_free(file_priv);
312}
313
314
315
316
317
318
319static int drm_cpu_valid(void)
320{
321#if defined(__sparc__) && !defined(__sparc_v9__)
322 return 0;
323#endif
324 return 1;
325}
326
327
328
329
330
331
332
333
334
335
336
337static int drm_open_helper(struct file *filp, struct drm_minor *minor)
338{
339 struct drm_device *dev = minor->dev;
340 struct drm_file *priv;
341 int ret;
342
343 if (filp->f_flags & O_EXCL)
344 return -EBUSY;
345 if (!drm_cpu_valid())
346 return -EINVAL;
347 if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
348 dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
349 return -EINVAL;
350
351 DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
352 task_pid_nr(current), minor->index);
353
354 priv = drm_file_alloc(minor);
355 if (IS_ERR(priv))
356 return PTR_ERR(priv);
357
358 if (drm_is_primary_client(priv)) {
359 ret = drm_master_open(priv);
360 if (ret) {
361 drm_file_free(priv);
362 return ret;
363 }
364 }
365
366 filp->private_data = priv;
367 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
368 priv->filp = filp;
369
370 mutex_lock(&dev->filelist_mutex);
371 list_add(&priv->lhead, &dev->filelist);
372 mutex_unlock(&dev->filelist_mutex);
373
374#ifdef __alpha__
375
376
377
378 if (!dev->hose) {
379 struct pci_dev *pci_dev;
380
381 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
382 if (pci_dev) {
383 dev->hose = pci_dev->sysdata;
384 pci_dev_put(pci_dev);
385 }
386 if (!dev->hose) {
387 struct pci_bus *b = list_entry(pci_root_buses.next,
388 struct pci_bus, node);
389 if (b)
390 dev->hose = b->sysdata;
391 }
392 }
393#endif
394
395 return 0;
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411int drm_open(struct inode *inode, struct file *filp)
412{
413 struct drm_device *dev;
414 struct drm_minor *minor;
415 int retcode;
416 int need_setup = 0;
417
418 minor = drm_minor_acquire(iminor(inode));
419 if (IS_ERR(minor))
420 return PTR_ERR(minor);
421
422 dev = minor->dev;
423 if (drm_dev_needs_global_mutex(dev))
424 mutex_lock(&drm_global_mutex);
425
426 if (!atomic_fetch_inc(&dev->open_count))
427 need_setup = 1;
428
429
430 filp->f_mapping = dev->anon_inode->i_mapping;
431
432 retcode = drm_open_helper(filp, minor);
433 if (retcode)
434 goto err_undo;
435 if (need_setup) {
436 retcode = drm_legacy_setup(dev);
437 if (retcode) {
438 drm_close_helper(filp);
439 goto err_undo;
440 }
441 }
442
443 if (drm_dev_needs_global_mutex(dev))
444 mutex_unlock(&drm_global_mutex);
445
446 return 0;
447
448err_undo:
449 atomic_dec(&dev->open_count);
450 if (drm_dev_needs_global_mutex(dev))
451 mutex_unlock(&drm_global_mutex);
452 drm_minor_release(minor);
453 return retcode;
454}
455EXPORT_SYMBOL(drm_open);
456
457void drm_lastclose(struct drm_device * dev)
458{
459 DRM_DEBUG("\n");
460
461 if (dev->driver->lastclose)
462 dev->driver->lastclose(dev);
463 DRM_DEBUG("driver lastclose completed\n");
464
465 if (drm_core_check_feature(dev, DRIVER_LEGACY))
466 drm_legacy_dev_reinit(dev);
467
468 drm_client_dev_restore(dev);
469}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485int drm_release(struct inode *inode, struct file *filp)
486{
487 struct drm_file *file_priv = filp->private_data;
488 struct drm_minor *minor = file_priv->minor;
489 struct drm_device *dev = minor->dev;
490
491 if (drm_dev_needs_global_mutex(dev))
492 mutex_lock(&drm_global_mutex);
493
494 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
495
496 drm_close_helper(filp);
497
498 if (atomic_dec_and_test(&dev->open_count))
499 drm_lastclose(dev);
500
501 if (drm_dev_needs_global_mutex(dev))
502 mutex_unlock(&drm_global_mutex);
503
504 drm_minor_release(minor);
505
506 return 0;
507}
508EXPORT_SYMBOL(drm_release);
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525int drm_release_noglobal(struct inode *inode, struct file *filp)
526{
527 struct drm_file *file_priv = filp->private_data;
528 struct drm_minor *minor = file_priv->minor;
529 struct drm_device *dev = minor->dev;
530
531 drm_close_helper(filp);
532
533 if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
534 drm_lastclose(dev);
535 mutex_unlock(&drm_global_mutex);
536 }
537
538 drm_minor_release(minor);
539
540 return 0;
541}
542EXPORT_SYMBOL(drm_release_noglobal);
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570ssize_t drm_read(struct file *filp, char __user *buffer,
571 size_t count, loff_t *offset)
572{
573 struct drm_file *file_priv = filp->private_data;
574 struct drm_device *dev = file_priv->minor->dev;
575 ssize_t ret;
576
577 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
578 if (ret)
579 return ret;
580
581 for (;;) {
582 struct drm_pending_event *e = NULL;
583
584 spin_lock_irq(&dev->event_lock);
585 if (!list_empty(&file_priv->event_list)) {
586 e = list_first_entry(&file_priv->event_list,
587 struct drm_pending_event, link);
588 file_priv->event_space += e->event->length;
589 list_del(&e->link);
590 }
591 spin_unlock_irq(&dev->event_lock);
592
593 if (e == NULL) {
594 if (ret)
595 break;
596
597 if (filp->f_flags & O_NONBLOCK) {
598 ret = -EAGAIN;
599 break;
600 }
601
602 mutex_unlock(&file_priv->event_read_lock);
603 ret = wait_event_interruptible(file_priv->event_wait,
604 !list_empty(&file_priv->event_list));
605 if (ret >= 0)
606 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
607 if (ret)
608 return ret;
609 } else {
610 unsigned length = e->event->length;
611
612 if (length > count - ret) {
613put_back_event:
614 spin_lock_irq(&dev->event_lock);
615 file_priv->event_space -= length;
616 list_add(&e->link, &file_priv->event_list);
617 spin_unlock_irq(&dev->event_lock);
618 wake_up_interruptible_poll(&file_priv->event_wait,
619 EPOLLIN | EPOLLRDNORM);
620 break;
621 }
622
623 if (copy_to_user(buffer + ret, e->event, length)) {
624 if (ret == 0)
625 ret = -EFAULT;
626 goto put_back_event;
627 }
628
629 ret += length;
630 kfree(e);
631 }
632 }
633 mutex_unlock(&file_priv->event_read_lock);
634
635 return ret;
636}
637EXPORT_SYMBOL(drm_read);
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
656{
657 struct drm_file *file_priv = filp->private_data;
658 __poll_t mask = 0;
659
660 poll_wait(filp, &file_priv->event_wait, wait);
661
662 if (!list_empty(&file_priv->event_list))
663 mask |= EPOLLIN | EPOLLRDNORM;
664
665 return mask;
666}
667EXPORT_SYMBOL(drm_poll);
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693int drm_event_reserve_init_locked(struct drm_device *dev,
694 struct drm_file *file_priv,
695 struct drm_pending_event *p,
696 struct drm_event *e)
697{
698 if (file_priv->event_space < e->length)
699 return -ENOMEM;
700
701 file_priv->event_space -= e->length;
702
703 p->event = e;
704 list_add(&p->pending_link, &file_priv->pending_event_list);
705 p->file_priv = file_priv;
706
707 return 0;
708}
709EXPORT_SYMBOL(drm_event_reserve_init_locked);
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735int drm_event_reserve_init(struct drm_device *dev,
736 struct drm_file *file_priv,
737 struct drm_pending_event *p,
738 struct drm_event *e)
739{
740 unsigned long flags;
741 int ret;
742
743 spin_lock_irqsave(&dev->event_lock, flags);
744 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
745 spin_unlock_irqrestore(&dev->event_lock, flags);
746
747 return ret;
748}
749EXPORT_SYMBOL(drm_event_reserve_init);
750
751
752
753
754
755
756
757
758
759
760void drm_event_cancel_free(struct drm_device *dev,
761 struct drm_pending_event *p)
762{
763 unsigned long flags;
764
765 spin_lock_irqsave(&dev->event_lock, flags);
766 if (p->file_priv) {
767 p->file_priv->event_space += p->event->length;
768 list_del(&p->pending_link);
769 }
770 spin_unlock_irqrestore(&dev->event_lock, flags);
771
772 if (p->fence)
773 dma_fence_put(p->fence);
774
775 kfree(p);
776}
777EXPORT_SYMBOL(drm_event_cancel_free);
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
794{
795 assert_spin_locked(&dev->event_lock);
796
797 if (e->completion) {
798 complete_all(e->completion);
799 e->completion_release(e->completion);
800 e->completion = NULL;
801 }
802
803 if (e->fence) {
804 dma_fence_signal(e->fence);
805 dma_fence_put(e->fence);
806 }
807
808 if (!e->file_priv) {
809 kfree(e);
810 return;
811 }
812
813 list_del(&e->pending_link);
814 list_add_tail(&e->link,
815 &e->file_priv->event_list);
816 wake_up_interruptible_poll(&e->file_priv->event_wait,
817 EPOLLIN | EPOLLRDNORM);
818}
819EXPORT_SYMBOL(drm_send_event_locked);
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
837{
838 unsigned long irqflags;
839
840 spin_lock_irqsave(&dev->event_lock, irqflags);
841 drm_send_event_locked(dev, e);
842 spin_unlock_irqrestore(&dev->event_lock, irqflags);
843}
844EXPORT_SYMBOL(drm_send_event);
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
861{
862 struct drm_device *dev = minor->dev;
863 struct drm_file *priv;
864 struct file *file;
865
866 priv = drm_file_alloc(minor);
867 if (IS_ERR(priv))
868 return ERR_CAST(priv);
869
870 file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
871 if (IS_ERR(file)) {
872 drm_file_free(priv);
873 return file;
874 }
875
876
877 file->f_mapping = dev->anon_inode->i_mapping;
878
879 drm_dev_get(dev);
880 priv->filp = file;
881
882 return file;
883}
884EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
885
886#ifdef CONFIG_MMU
887#ifdef CONFIG_TRANSPARENT_HUGEPAGE
888
889
890
891
892
893static unsigned long drm_addr_inflate(unsigned long addr,
894 unsigned long len,
895 unsigned long pgoff,
896 unsigned long flags,
897 unsigned long huge_size)
898{
899 unsigned long offset, inflated_len;
900 unsigned long inflated_addr;
901 unsigned long inflated_offset;
902
903 offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
904 if (offset && offset + len < 2 * huge_size)
905 return addr;
906 if ((addr & (huge_size - 1)) == offset)
907 return addr;
908
909 inflated_len = len + huge_size - PAGE_SIZE;
910 if (inflated_len > TASK_SIZE)
911 return addr;
912 if (inflated_len < len)
913 return addr;
914
915 inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
916 0, flags);
917 if (IS_ERR_VALUE(inflated_addr))
918 return addr;
919 if (inflated_addr & ~PAGE_MASK)
920 return addr;
921
922 inflated_offset = inflated_addr & (huge_size - 1);
923 inflated_addr += offset - inflated_offset;
924 if (inflated_offset > offset)
925 inflated_addr += huge_size;
926
927 if (inflated_addr > TASK_SIZE - len)
928 return addr;
929
930 return inflated_addr;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955unsigned long drm_get_unmapped_area(struct file *file,
956 unsigned long uaddr, unsigned long len,
957 unsigned long pgoff, unsigned long flags,
958 struct drm_vma_offset_manager *mgr)
959{
960 unsigned long addr;
961 unsigned long inflated_addr;
962 struct drm_vma_offset_node *node;
963
964 if (len > TASK_SIZE)
965 return -ENOMEM;
966
967
968
969
970
971
972
973
974
975 drm_vma_offset_lock_lookup(mgr);
976 node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
977 if (node)
978 pgoff -= node->vm_node.start;
979 drm_vma_offset_unlock_lookup(mgr);
980
981 addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
982 if (IS_ERR_VALUE(addr))
983 return addr;
984 if (addr & ~PAGE_MASK)
985 return addr;
986 if (addr > TASK_SIZE - len)
987 return addr;
988
989 if (len < HPAGE_PMD_SIZE)
990 return addr;
991 if (flags & MAP_FIXED)
992 return addr;
993
994
995
996
997
998 if (uaddr)
999 return addr;
1000
1001 inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1002 HPAGE_PMD_SIZE);
1003
1004 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1005 len >= HPAGE_PUD_SIZE)
1006 inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1007 flags, HPAGE_PUD_SIZE);
1008 return inflated_addr;
1009}
1010#else
1011unsigned long drm_get_unmapped_area(struct file *file,
1012 unsigned long uaddr, unsigned long len,
1013 unsigned long pgoff, unsigned long flags,
1014 struct drm_vma_offset_manager *mgr)
1015{
1016 return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1017}
1018#endif
1019EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1020#endif
1021