1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/anon_inodes.h>
35#include <linux/dma-fence.h>
36#include <linux/file.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/poll.h>
40#include <linux/slab.h>
41
42#include <drm/drm_client.h>
43#include <drm/drm_drv.h>
44#include <drm/drm_file.h>
45#include <drm/drm_print.h>
46
47#include "drm_crtc_internal.h"
48#include "drm_internal.h"
49#include "drm_legacy.h"
50
51#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
52#include <uapi/asm/mman.h>
53#include <drm/drm_vma_manager.h>
54#endif
55
56
57DEFINE_MUTEX(drm_global_mutex);
58
59bool drm_dev_needs_global_mutex(struct drm_device *dev)
60{
61
62
63
64
65
66 if (drm_core_check_feature(dev, DRIVER_LEGACY))
67 return true;
68
69
70
71
72
73
74
75 if (dev->driver->load || dev->driver->unload)
76 return true;
77
78
79
80
81
82
83
84 if (dev->driver->lastclose)
85 return true;
86
87 return false;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155struct drm_file *drm_file_alloc(struct drm_minor *minor)
156{
157 struct drm_device *dev = minor->dev;
158 struct drm_file *file;
159 int ret;
160
161 file = kzalloc(sizeof(*file), GFP_KERNEL);
162 if (!file)
163 return ERR_PTR(-ENOMEM);
164
165 file->pid = get_pid(task_pid(current));
166 file->minor = minor;
167
168
169 file->authenticated = capable(CAP_SYS_ADMIN);
170
171 INIT_LIST_HEAD(&file->lhead);
172 INIT_LIST_HEAD(&file->fbs);
173 mutex_init(&file->fbs_lock);
174 INIT_LIST_HEAD(&file->blobs);
175 INIT_LIST_HEAD(&file->pending_event_list);
176 INIT_LIST_HEAD(&file->event_list);
177 init_waitqueue_head(&file->event_wait);
178 file->event_space = 4096;
179
180 mutex_init(&file->event_read_lock);
181
182 if (drm_core_check_feature(dev, DRIVER_GEM))
183 drm_gem_open(dev, file);
184
185 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
186 drm_syncobj_open(file);
187
188 drm_prime_init_file_private(&file->prime);
189
190 if (dev->driver->open) {
191 ret = dev->driver->open(dev, file);
192 if (ret < 0)
193 goto out_prime_destroy;
194 }
195
196 return file;
197
198out_prime_destroy:
199 drm_prime_destroy_file_private(&file->prime);
200 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
201 drm_syncobj_release(file);
202 if (drm_core_check_feature(dev, DRIVER_GEM))
203 drm_gem_release(dev, file);
204 put_pid(file->pid);
205 kfree(file);
206
207 return ERR_PTR(ret);
208}
209
210static void drm_events_release(struct drm_file *file_priv)
211{
212 struct drm_device *dev = file_priv->minor->dev;
213 struct drm_pending_event *e, *et;
214 unsigned long flags;
215
216 spin_lock_irqsave(&dev->event_lock, flags);
217
218
219 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
220 pending_link) {
221 list_del(&e->pending_link);
222 e->file_priv = NULL;
223 }
224
225
226 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
227 list_del(&e->link);
228 kfree(e);
229 }
230
231 spin_unlock_irqrestore(&dev->event_lock, flags);
232}
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247void drm_file_free(struct drm_file *file)
248{
249 struct drm_device *dev;
250
251 if (!file)
252 return;
253
254 dev = file->minor->dev;
255
256 DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
257 current->comm, task_pid_nr(current),
258 (long)old_encode_dev(file->minor->kdev->devt),
259 atomic_read(&dev->open_count));
260
261 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
262 dev->driver->preclose)
263 dev->driver->preclose(dev, file);
264
265 if (drm_core_check_feature(dev, DRIVER_LEGACY))
266 drm_legacy_lock_release(dev, file->filp);
267
268 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
269 drm_legacy_reclaim_buffers(dev, file);
270
271 drm_events_release(file);
272
273 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
274 drm_fb_release(file);
275 drm_property_destroy_user_blobs(dev, file);
276 }
277
278 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
279 drm_syncobj_release(file);
280
281 if (drm_core_check_feature(dev, DRIVER_GEM))
282 drm_gem_release(dev, file);
283
284 drm_legacy_ctxbitmap_flush(dev, file);
285
286 if (drm_is_primary_client(file))
287 drm_master_release(file);
288
289 if (dev->driver->postclose)
290 dev->driver->postclose(dev, file);
291
292 drm_prime_destroy_file_private(&file->prime);
293
294 WARN_ON(!list_empty(&file->event_list));
295
296 put_pid(file->pid);
297 kfree(file);
298}
299
300static void drm_close_helper(struct file *filp)
301{
302 struct drm_file *file_priv = filp->private_data;
303 struct drm_device *dev = file_priv->minor->dev;
304
305 mutex_lock(&dev->filelist_mutex);
306 list_del(&file_priv->lhead);
307 mutex_unlock(&dev->filelist_mutex);
308
309 drm_file_free(file_priv);
310}
311
312
313
314
315
316
317static int drm_cpu_valid(void)
318{
319#if defined(__sparc__) && !defined(__sparc_v9__)
320 return 0;
321#endif
322 return 1;
323}
324
325
326
327
328
329
330
331
332
333
334
335static int drm_open_helper(struct file *filp, struct drm_minor *minor)
336{
337 struct drm_device *dev = minor->dev;
338 struct drm_file *priv;
339 int ret;
340
341 if (filp->f_flags & O_EXCL)
342 return -EBUSY;
343 if (!drm_cpu_valid())
344 return -EINVAL;
345 if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
346 dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
347 return -EINVAL;
348
349 DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
350 task_pid_nr(current), minor->index);
351
352 priv = drm_file_alloc(minor);
353 if (IS_ERR(priv))
354 return PTR_ERR(priv);
355
356 if (drm_is_primary_client(priv)) {
357 ret = drm_master_open(priv);
358 if (ret) {
359 drm_file_free(priv);
360 return ret;
361 }
362 }
363
364 filp->private_data = priv;
365 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
366 priv->filp = filp;
367
368 mutex_lock(&dev->filelist_mutex);
369 list_add(&priv->lhead, &dev->filelist);
370 mutex_unlock(&dev->filelist_mutex);
371
372#ifdef __alpha__
373
374
375
376 if (!dev->hose) {
377 struct pci_dev *pci_dev;
378
379 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
380 if (pci_dev) {
381 dev->hose = pci_dev->sysdata;
382 pci_dev_put(pci_dev);
383 }
384 if (!dev->hose) {
385 struct pci_bus *b = list_entry(pci_root_buses.next,
386 struct pci_bus, node);
387 if (b)
388 dev->hose = b->sysdata;
389 }
390 }
391#endif
392
393 return 0;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int drm_open(struct inode *inode, struct file *filp)
410{
411 struct drm_device *dev;
412 struct drm_minor *minor;
413 int retcode;
414 int need_setup = 0;
415
416 minor = drm_minor_acquire(iminor(inode));
417 if (IS_ERR(minor))
418 return PTR_ERR(minor);
419
420 dev = minor->dev;
421 if (drm_dev_needs_global_mutex(dev))
422 mutex_lock(&drm_global_mutex);
423
424 if (!atomic_fetch_inc(&dev->open_count))
425 need_setup = 1;
426
427
428 filp->f_mapping = dev->anon_inode->i_mapping;
429
430 retcode = drm_open_helper(filp, minor);
431 if (retcode)
432 goto err_undo;
433 if (need_setup) {
434 retcode = drm_legacy_setup(dev);
435 if (retcode) {
436 drm_close_helper(filp);
437 goto err_undo;
438 }
439 }
440
441 if (drm_dev_needs_global_mutex(dev))
442 mutex_unlock(&drm_global_mutex);
443
444 return 0;
445
446err_undo:
447 atomic_dec(&dev->open_count);
448 if (drm_dev_needs_global_mutex(dev))
449 mutex_unlock(&drm_global_mutex);
450 drm_minor_release(minor);
451 return retcode;
452}
453EXPORT_SYMBOL(drm_open);
454
455void drm_lastclose(struct drm_device * dev)
456{
457 DRM_DEBUG("\n");
458
459 if (dev->driver->lastclose)
460 dev->driver->lastclose(dev);
461 DRM_DEBUG("driver lastclose completed\n");
462
463 if (drm_core_check_feature(dev, DRIVER_LEGACY))
464 drm_legacy_dev_reinit(dev);
465
466 drm_client_dev_restore(dev);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483int drm_release(struct inode *inode, struct file *filp)
484{
485 struct drm_file *file_priv = filp->private_data;
486 struct drm_minor *minor = file_priv->minor;
487 struct drm_device *dev = minor->dev;
488
489 if (drm_dev_needs_global_mutex(dev))
490 mutex_lock(&drm_global_mutex);
491
492 DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
493
494 drm_close_helper(filp);
495
496 if (atomic_dec_and_test(&dev->open_count))
497 drm_lastclose(dev);
498
499 if (drm_dev_needs_global_mutex(dev))
500 mutex_unlock(&drm_global_mutex);
501
502 drm_minor_release(minor);
503
504 return 0;
505}
506EXPORT_SYMBOL(drm_release);
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523int drm_release_noglobal(struct inode *inode, struct file *filp)
524{
525 struct drm_file *file_priv = filp->private_data;
526 struct drm_minor *minor = file_priv->minor;
527 struct drm_device *dev = minor->dev;
528
529 drm_close_helper(filp);
530
531 if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
532 drm_lastclose(dev);
533 mutex_unlock(&drm_global_mutex);
534 }
535
536 drm_minor_release(minor);
537
538 return 0;
539}
540EXPORT_SYMBOL(drm_release_noglobal);
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568ssize_t drm_read(struct file *filp, char __user *buffer,
569 size_t count, loff_t *offset)
570{
571 struct drm_file *file_priv = filp->private_data;
572 struct drm_device *dev = file_priv->minor->dev;
573 ssize_t ret;
574
575 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
576 if (ret)
577 return ret;
578
579 for (;;) {
580 struct drm_pending_event *e = NULL;
581
582 spin_lock_irq(&dev->event_lock);
583 if (!list_empty(&file_priv->event_list)) {
584 e = list_first_entry(&file_priv->event_list,
585 struct drm_pending_event, link);
586 file_priv->event_space += e->event->length;
587 list_del(&e->link);
588 }
589 spin_unlock_irq(&dev->event_lock);
590
591 if (e == NULL) {
592 if (ret)
593 break;
594
595 if (filp->f_flags & O_NONBLOCK) {
596 ret = -EAGAIN;
597 break;
598 }
599
600 mutex_unlock(&file_priv->event_read_lock);
601 ret = wait_event_interruptible(file_priv->event_wait,
602 !list_empty(&file_priv->event_list));
603 if (ret >= 0)
604 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
605 if (ret)
606 return ret;
607 } else {
608 unsigned length = e->event->length;
609
610 if (length > count - ret) {
611put_back_event:
612 spin_lock_irq(&dev->event_lock);
613 file_priv->event_space -= length;
614 list_add(&e->link, &file_priv->event_list);
615 spin_unlock_irq(&dev->event_lock);
616 wake_up_interruptible_poll(&file_priv->event_wait,
617 EPOLLIN | EPOLLRDNORM);
618 break;
619 }
620
621 if (copy_to_user(buffer + ret, e->event, length)) {
622 if (ret == 0)
623 ret = -EFAULT;
624 goto put_back_event;
625 }
626
627 ret += length;
628 kfree(e);
629 }
630 }
631 mutex_unlock(&file_priv->event_read_lock);
632
633 return ret;
634}
635EXPORT_SYMBOL(drm_read);
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
654{
655 struct drm_file *file_priv = filp->private_data;
656 __poll_t mask = 0;
657
658 poll_wait(filp, &file_priv->event_wait, wait);
659
660 if (!list_empty(&file_priv->event_list))
661 mask |= EPOLLIN | EPOLLRDNORM;
662
663 return mask;
664}
665EXPORT_SYMBOL(drm_poll);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691int drm_event_reserve_init_locked(struct drm_device *dev,
692 struct drm_file *file_priv,
693 struct drm_pending_event *p,
694 struct drm_event *e)
695{
696 if (file_priv->event_space < e->length)
697 return -ENOMEM;
698
699 file_priv->event_space -= e->length;
700
701 p->event = e;
702 list_add(&p->pending_link, &file_priv->pending_event_list);
703 p->file_priv = file_priv;
704
705 return 0;
706}
707EXPORT_SYMBOL(drm_event_reserve_init_locked);
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733int drm_event_reserve_init(struct drm_device *dev,
734 struct drm_file *file_priv,
735 struct drm_pending_event *p,
736 struct drm_event *e)
737{
738 unsigned long flags;
739 int ret;
740
741 spin_lock_irqsave(&dev->event_lock, flags);
742 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
743 spin_unlock_irqrestore(&dev->event_lock, flags);
744
745 return ret;
746}
747EXPORT_SYMBOL(drm_event_reserve_init);
748
749
750
751
752
753
754
755
756
757
758void drm_event_cancel_free(struct drm_device *dev,
759 struct drm_pending_event *p)
760{
761 unsigned long flags;
762
763 spin_lock_irqsave(&dev->event_lock, flags);
764 if (p->file_priv) {
765 p->file_priv->event_space += p->event->length;
766 list_del(&p->pending_link);
767 }
768 spin_unlock_irqrestore(&dev->event_lock, flags);
769
770 if (p->fence)
771 dma_fence_put(p->fence);
772
773 kfree(p);
774}
775EXPORT_SYMBOL(drm_event_cancel_free);
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
792{
793 assert_spin_locked(&dev->event_lock);
794
795 if (e->completion) {
796 complete_all(e->completion);
797 e->completion_release(e->completion);
798 e->completion = NULL;
799 }
800
801 if (e->fence) {
802 dma_fence_signal(e->fence);
803 dma_fence_put(e->fence);
804 }
805
806 if (!e->file_priv) {
807 kfree(e);
808 return;
809 }
810
811 list_del(&e->pending_link);
812 list_add_tail(&e->link,
813 &e->file_priv->event_list);
814 wake_up_interruptible_poll(&e->file_priv->event_wait,
815 EPOLLIN | EPOLLRDNORM);
816}
817EXPORT_SYMBOL(drm_send_event_locked);
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
835{
836 unsigned long irqflags;
837
838 spin_lock_irqsave(&dev->event_lock, irqflags);
839 drm_send_event_locked(dev, e);
840 spin_unlock_irqrestore(&dev->event_lock, irqflags);
841}
842EXPORT_SYMBOL(drm_send_event);
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
859{
860 struct drm_device *dev = minor->dev;
861 struct drm_file *priv;
862 struct file *file;
863
864 priv = drm_file_alloc(minor);
865 if (IS_ERR(priv))
866 return ERR_CAST(priv);
867
868 file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
869 if (IS_ERR(file)) {
870 drm_file_free(priv);
871 return file;
872 }
873
874
875 file->f_mapping = dev->anon_inode->i_mapping;
876
877 drm_dev_get(dev);
878 priv->filp = file;
879
880 return file;
881}
882EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
883
884#ifdef CONFIG_MMU
885#ifdef CONFIG_TRANSPARENT_HUGEPAGE
886
887
888
889
890
891static unsigned long drm_addr_inflate(unsigned long addr,
892 unsigned long len,
893 unsigned long pgoff,
894 unsigned long flags,
895 unsigned long huge_size)
896{
897 unsigned long offset, inflated_len;
898 unsigned long inflated_addr;
899 unsigned long inflated_offset;
900
901 offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
902 if (offset && offset + len < 2 * huge_size)
903 return addr;
904 if ((addr & (huge_size - 1)) == offset)
905 return addr;
906
907 inflated_len = len + huge_size - PAGE_SIZE;
908 if (inflated_len > TASK_SIZE)
909 return addr;
910 if (inflated_len < len)
911 return addr;
912
913 inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
914 0, flags);
915 if (IS_ERR_VALUE(inflated_addr))
916 return addr;
917 if (inflated_addr & ~PAGE_MASK)
918 return addr;
919
920 inflated_offset = inflated_addr & (huge_size - 1);
921 inflated_addr += offset - inflated_offset;
922 if (inflated_offset > offset)
923 inflated_addr += huge_size;
924
925 if (inflated_addr > TASK_SIZE - len)
926 return addr;
927
928 return inflated_addr;
929}
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953unsigned long drm_get_unmapped_area(struct file *file,
954 unsigned long uaddr, unsigned long len,
955 unsigned long pgoff, unsigned long flags,
956 struct drm_vma_offset_manager *mgr)
957{
958 unsigned long addr;
959 unsigned long inflated_addr;
960 struct drm_vma_offset_node *node;
961
962 if (len > TASK_SIZE)
963 return -ENOMEM;
964
965
966
967
968
969
970
971
972
973 drm_vma_offset_lock_lookup(mgr);
974 node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
975 if (node)
976 pgoff -= node->vm_node.start;
977 drm_vma_offset_unlock_lookup(mgr);
978
979 addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
980 if (IS_ERR_VALUE(addr))
981 return addr;
982 if (addr & ~PAGE_MASK)
983 return addr;
984 if (addr > TASK_SIZE - len)
985 return addr;
986
987 if (len < HPAGE_PMD_SIZE)
988 return addr;
989 if (flags & MAP_FIXED)
990 return addr;
991
992
993
994
995
996 if (uaddr)
997 return addr;
998
999 inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
1000 HPAGE_PMD_SIZE);
1001
1002 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1003 len >= HPAGE_PUD_SIZE)
1004 inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
1005 flags, HPAGE_PUD_SIZE);
1006 return inflated_addr;
1007}
1008#else
1009unsigned long drm_get_unmapped_area(struct file *file,
1010 unsigned long uaddr, unsigned long len,
1011 unsigned long pgoff, unsigned long flags,
1012 struct drm_vma_offset_manager *mgr)
1013{
1014 return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
1015}
1016#endif
1017EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
1018#endif
1019