1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/poll.h>
35#include <linux/slab.h>
36#include <linux/module.h>
37
38#include <drm/drm_client.h>
39#include <drm/drm_file.h>
40#include <drm/drmP.h>
41
42#include "drm_legacy.h"
43#include "drm_internal.h"
44#include "drm_crtc_internal.h"
45
46
47DEFINE_MUTEX(drm_global_mutex);
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103static int drm_open_helper(struct file *filp, struct drm_minor *minor);
104
105
106
107
108
109
110
111
112
113
114
115
116struct drm_file *drm_file_alloc(struct drm_minor *minor)
117{
118 struct drm_device *dev = minor->dev;
119 struct drm_file *file;
120 int ret;
121
122 file = kzalloc(sizeof(*file), GFP_KERNEL);
123 if (!file)
124 return ERR_PTR(-ENOMEM);
125
126 file->pid = get_pid(task_pid(current));
127 file->minor = minor;
128
129
130 file->authenticated = capable(CAP_SYS_ADMIN);
131
132 INIT_LIST_HEAD(&file->lhead);
133 INIT_LIST_HEAD(&file->fbs);
134 mutex_init(&file->fbs_lock);
135 INIT_LIST_HEAD(&file->blobs);
136 INIT_LIST_HEAD(&file->pending_event_list);
137 INIT_LIST_HEAD(&file->event_list);
138 init_waitqueue_head(&file->event_wait);
139 file->event_space = 4096;
140
141 mutex_init(&file->event_read_lock);
142
143 if (drm_core_check_feature(dev, DRIVER_GEM))
144 drm_gem_open(dev, file);
145
146 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
147 drm_syncobj_open(file);
148
149 if (drm_core_check_feature(dev, DRIVER_PRIME))
150 drm_prime_init_file_private(&file->prime);
151
152 if (dev->driver->open) {
153 ret = dev->driver->open(dev, file);
154 if (ret < 0)
155 goto out_prime_destroy;
156 }
157
158 return file;
159
160out_prime_destroy:
161 if (drm_core_check_feature(dev, DRIVER_PRIME))
162 drm_prime_destroy_file_private(&file->prime);
163 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
164 drm_syncobj_release(file);
165 if (drm_core_check_feature(dev, DRIVER_GEM))
166 drm_gem_release(dev, file);
167 put_pid(file->pid);
168 kfree(file);
169
170 return ERR_PTR(ret);
171}
172
173static void drm_events_release(struct drm_file *file_priv)
174{
175 struct drm_device *dev = file_priv->minor->dev;
176 struct drm_pending_event *e, *et;
177 unsigned long flags;
178
179 spin_lock_irqsave(&dev->event_lock, flags);
180
181
182 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
183 pending_link) {
184 list_del(&e->pending_link);
185 e->file_priv = NULL;
186 }
187
188
189 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
190 list_del(&e->link);
191 kfree(e);
192 }
193
194 spin_unlock_irqrestore(&dev->event_lock, flags);
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210void drm_file_free(struct drm_file *file)
211{
212 struct drm_device *dev;
213
214 if (!file)
215 return;
216
217 dev = file->minor->dev;
218
219 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
220 task_pid_nr(current),
221 (long)old_encode_dev(file->minor->kdev->devt),
222 dev->open_count);
223
224 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
225 dev->driver->preclose)
226 dev->driver->preclose(dev, file);
227
228 if (drm_core_check_feature(dev, DRIVER_LEGACY))
229 drm_legacy_lock_release(dev, file->filp);
230
231 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
232 drm_legacy_reclaim_buffers(dev, file);
233
234 drm_events_release(file);
235
236 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
237 drm_fb_release(file);
238 drm_property_destroy_user_blobs(dev, file);
239 }
240
241 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
242 drm_syncobj_release(file);
243
244 if (drm_core_check_feature(dev, DRIVER_GEM))
245 drm_gem_release(dev, file);
246
247 drm_legacy_ctxbitmap_flush(dev, file);
248
249 if (drm_is_primary_client(file))
250 drm_master_release(file);
251
252 if (dev->driver->postclose)
253 dev->driver->postclose(dev, file);
254
255 if (drm_core_check_feature(dev, DRIVER_PRIME))
256 drm_prime_destroy_file_private(&file->prime);
257
258 WARN_ON(!list_empty(&file->event_list));
259
260 put_pid(file->pid);
261 kfree(file);
262}
263
264static void drm_close_helper(struct file *filp)
265{
266 struct drm_file *file_priv = filp->private_data;
267 struct drm_device *dev = file_priv->minor->dev;
268
269 mutex_lock(&dev->filelist_mutex);
270 list_del(&file_priv->lhead);
271 mutex_unlock(&dev->filelist_mutex);
272
273 drm_file_free(file_priv);
274}
275
276static int drm_setup(struct drm_device * dev)
277{
278 int ret;
279
280 if (dev->driver->firstopen &&
281 drm_core_check_feature(dev, DRIVER_LEGACY)) {
282 ret = dev->driver->firstopen(dev);
283 if (ret != 0)
284 return ret;
285 }
286
287 ret = drm_legacy_dma_setup(dev);
288 if (ret < 0)
289 return ret;
290
291
292 DRM_DEBUG("\n");
293 return 0;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309int drm_open(struct inode *inode, struct file *filp)
310{
311 struct drm_device *dev;
312 struct drm_minor *minor;
313 int retcode;
314 int need_setup = 0;
315
316 minor = drm_minor_acquire(iminor(inode));
317 if (IS_ERR(minor))
318 return PTR_ERR(minor);
319
320 dev = minor->dev;
321 if (!dev->open_count++)
322 need_setup = 1;
323
324
325 filp->f_mapping = dev->anon_inode->i_mapping;
326
327 retcode = drm_open_helper(filp, minor);
328 if (retcode)
329 goto err_undo;
330 if (need_setup) {
331 retcode = drm_setup(dev);
332 if (retcode) {
333 drm_close_helper(filp);
334 goto err_undo;
335 }
336 }
337 return 0;
338
339err_undo:
340 dev->open_count--;
341 drm_minor_release(minor);
342 return retcode;
343}
344EXPORT_SYMBOL(drm_open);
345
346
347
348
349
350
351static int drm_cpu_valid(void)
352{
353#if defined(__sparc__) && !defined(__sparc_v9__)
354 return 0;
355#endif
356 return 1;
357}
358
359
360
361
362
363
364
365
366
367
368
369static int drm_open_helper(struct file *filp, struct drm_minor *minor)
370{
371 struct drm_device *dev = minor->dev;
372 struct drm_file *priv;
373 int ret;
374
375 if (filp->f_flags & O_EXCL)
376 return -EBUSY;
377 if (!drm_cpu_valid())
378 return -EINVAL;
379 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
380 return -EINVAL;
381
382 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
383
384 priv = drm_file_alloc(minor);
385 if (IS_ERR(priv))
386 return PTR_ERR(priv);
387
388 if (drm_is_primary_client(priv)) {
389 ret = drm_master_open(priv);
390 if (ret) {
391 drm_file_free(priv);
392 return ret;
393 }
394 }
395
396 filp->private_data = priv;
397 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
398 priv->filp = filp;
399
400 mutex_lock(&dev->filelist_mutex);
401 list_add(&priv->lhead, &dev->filelist);
402 mutex_unlock(&dev->filelist_mutex);
403
404#ifdef __alpha__
405
406
407
408 if (!dev->hose) {
409 struct pci_dev *pci_dev;
410 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
411 if (pci_dev) {
412 dev->hose = pci_dev->sysdata;
413 pci_dev_put(pci_dev);
414 }
415 if (!dev->hose) {
416 struct pci_bus *b = list_entry(pci_root_buses.next,
417 struct pci_bus, node);
418 if (b)
419 dev->hose = b->sysdata;
420 }
421 }
422#endif
423
424 return 0;
425}
426
427void drm_lastclose(struct drm_device * dev)
428{
429 DRM_DEBUG("\n");
430
431 if (dev->driver->lastclose)
432 dev->driver->lastclose(dev);
433 DRM_DEBUG("driver lastclose completed\n");
434
435 if (drm_core_check_feature(dev, DRIVER_LEGACY))
436 drm_legacy_dev_reinit(dev);
437
438 drm_client_dev_restore(dev);
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455int drm_release(struct inode *inode, struct file *filp)
456{
457 struct drm_file *file_priv = filp->private_data;
458 struct drm_minor *minor = file_priv->minor;
459 struct drm_device *dev = minor->dev;
460
461 mutex_lock(&drm_global_mutex);
462
463 DRM_DEBUG("open_count = %d\n", dev->open_count);
464
465 drm_close_helper(filp);
466
467 if (!--dev->open_count)
468 drm_lastclose(dev);
469
470 mutex_unlock(&drm_global_mutex);
471
472 drm_minor_release(minor);
473
474 return 0;
475}
476EXPORT_SYMBOL(drm_release);
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504ssize_t drm_read(struct file *filp, char __user *buffer,
505 size_t count, loff_t *offset)
506{
507 struct drm_file *file_priv = filp->private_data;
508 struct drm_device *dev = file_priv->minor->dev;
509 ssize_t ret;
510
511 if (!access_ok(buffer, count))
512 return -EFAULT;
513
514 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
515 if (ret)
516 return ret;
517
518 for (;;) {
519 struct drm_pending_event *e = NULL;
520
521 spin_lock_irq(&dev->event_lock);
522 if (!list_empty(&file_priv->event_list)) {
523 e = list_first_entry(&file_priv->event_list,
524 struct drm_pending_event, link);
525 file_priv->event_space += e->event->length;
526 list_del(&e->link);
527 }
528 spin_unlock_irq(&dev->event_lock);
529
530 if (e == NULL) {
531 if (ret)
532 break;
533
534 if (filp->f_flags & O_NONBLOCK) {
535 ret = -EAGAIN;
536 break;
537 }
538
539 mutex_unlock(&file_priv->event_read_lock);
540 ret = wait_event_interruptible(file_priv->event_wait,
541 !list_empty(&file_priv->event_list));
542 if (ret >= 0)
543 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
544 if (ret)
545 return ret;
546 } else {
547 unsigned length = e->event->length;
548
549 if (length > count - ret) {
550put_back_event:
551 spin_lock_irq(&dev->event_lock);
552 file_priv->event_space -= length;
553 list_add(&e->link, &file_priv->event_list);
554 spin_unlock_irq(&dev->event_lock);
555 wake_up_interruptible(&file_priv->event_wait);
556 break;
557 }
558
559 if (copy_to_user(buffer + ret, e->event, length)) {
560 if (ret == 0)
561 ret = -EFAULT;
562 goto put_back_event;
563 }
564
565 ret += length;
566 kfree(e);
567 }
568 }
569 mutex_unlock(&file_priv->event_read_lock);
570
571 return ret;
572}
573EXPORT_SYMBOL(drm_read);
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
592{
593 struct drm_file *file_priv = filp->private_data;
594 __poll_t mask = 0;
595
596 poll_wait(filp, &file_priv->event_wait, wait);
597
598 if (!list_empty(&file_priv->event_list))
599 mask |= EPOLLIN | EPOLLRDNORM;
600
601 return mask;
602}
603EXPORT_SYMBOL(drm_poll);
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629int drm_event_reserve_init_locked(struct drm_device *dev,
630 struct drm_file *file_priv,
631 struct drm_pending_event *p,
632 struct drm_event *e)
633{
634 if (file_priv->event_space < e->length)
635 return -ENOMEM;
636
637 file_priv->event_space -= e->length;
638
639 p->event = e;
640 list_add(&p->pending_link, &file_priv->pending_event_list);
641 p->file_priv = file_priv;
642
643 return 0;
644}
645EXPORT_SYMBOL(drm_event_reserve_init_locked);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671int drm_event_reserve_init(struct drm_device *dev,
672 struct drm_file *file_priv,
673 struct drm_pending_event *p,
674 struct drm_event *e)
675{
676 unsigned long flags;
677 int ret;
678
679 spin_lock_irqsave(&dev->event_lock, flags);
680 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
681 spin_unlock_irqrestore(&dev->event_lock, flags);
682
683 return ret;
684}
685EXPORT_SYMBOL(drm_event_reserve_init);
686
687
688
689
690
691
692
693
694
695
696void drm_event_cancel_free(struct drm_device *dev,
697 struct drm_pending_event *p)
698{
699 unsigned long flags;
700 spin_lock_irqsave(&dev->event_lock, flags);
701 if (p->file_priv) {
702 p->file_priv->event_space += p->event->length;
703 list_del(&p->pending_link);
704 }
705 spin_unlock_irqrestore(&dev->event_lock, flags);
706
707 if (p->fence)
708 dma_fence_put(p->fence);
709
710 kfree(p);
711}
712EXPORT_SYMBOL(drm_event_cancel_free);
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
729{
730 assert_spin_locked(&dev->event_lock);
731
732 if (e->completion) {
733 complete_all(e->completion);
734 e->completion_release(e->completion);
735 e->completion = NULL;
736 }
737
738 if (e->fence) {
739 dma_fence_signal(e->fence);
740 dma_fence_put(e->fence);
741 }
742
743 if (!e->file_priv) {
744 kfree(e);
745 return;
746 }
747
748 list_del(&e->pending_link);
749 list_add_tail(&e->link,
750 &e->file_priv->event_list);
751 wake_up_interruptible(&e->file_priv->event_wait);
752}
753EXPORT_SYMBOL(drm_send_event_locked);
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
771{
772 unsigned long irqflags;
773
774 spin_lock_irqsave(&dev->event_lock, irqflags);
775 drm_send_event_locked(dev, e);
776 spin_unlock_irqrestore(&dev->event_lock, irqflags);
777}
778EXPORT_SYMBOL(drm_send_event);
779