1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/dma-fence.h>
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/poll.h>
38#include <linux/slab.h>
39
40#include <drm/drm_client.h>
41#include <drm/drm_drv.h>
42#include <drm/drm_file.h>
43#include <drm/drm_print.h>
44
45#include "drm_crtc_internal.h"
46#include "drm_internal.h"
47#include "drm_legacy.h"
48
49
50DEFINE_MUTEX(drm_global_mutex);
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117struct drm_file *drm_file_alloc(struct drm_minor *minor)
118{
119 struct drm_device *dev = minor->dev;
120 struct drm_file *file;
121 int ret;
122
123 file = kzalloc(sizeof(*file), GFP_KERNEL);
124 if (!file)
125 return ERR_PTR(-ENOMEM);
126
127 file->pid = get_pid(task_pid(current));
128 file->minor = minor;
129
130
131 file->authenticated = capable(CAP_SYS_ADMIN);
132
133 INIT_LIST_HEAD(&file->lhead);
134 INIT_LIST_HEAD(&file->fbs);
135 mutex_init(&file->fbs_lock);
136 INIT_LIST_HEAD(&file->blobs);
137 INIT_LIST_HEAD(&file->pending_event_list);
138 INIT_LIST_HEAD(&file->event_list);
139 init_waitqueue_head(&file->event_wait);
140 file->event_space = 4096;
141
142 mutex_init(&file->event_read_lock);
143
144 if (drm_core_check_feature(dev, DRIVER_GEM))
145 drm_gem_open(dev, file);
146
147 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
148 drm_syncobj_open(file);
149
150 if (drm_core_check_feature(dev, DRIVER_PRIME))
151 drm_prime_init_file_private(&file->prime);
152
153 if (dev->driver->open) {
154 ret = dev->driver->open(dev, file);
155 if (ret < 0)
156 goto out_prime_destroy;
157 }
158
159 return file;
160
161out_prime_destroy:
162 if (drm_core_check_feature(dev, DRIVER_PRIME))
163 drm_prime_destroy_file_private(&file->prime);
164 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
165 drm_syncobj_release(file);
166 if (drm_core_check_feature(dev, DRIVER_GEM))
167 drm_gem_release(dev, file);
168 put_pid(file->pid);
169 kfree(file);
170
171 return ERR_PTR(ret);
172}
173
174static void drm_events_release(struct drm_file *file_priv)
175{
176 struct drm_device *dev = file_priv->minor->dev;
177 struct drm_pending_event *e, *et;
178 unsigned long flags;
179
180 spin_lock_irqsave(&dev->event_lock, flags);
181
182
183 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
184 pending_link) {
185 list_del(&e->pending_link);
186 e->file_priv = NULL;
187 }
188
189
190 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
191 list_del(&e->link);
192 kfree(e);
193 }
194
195 spin_unlock_irqrestore(&dev->event_lock, flags);
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211void drm_file_free(struct drm_file *file)
212{
213 struct drm_device *dev;
214
215 if (!file)
216 return;
217
218 dev = file->minor->dev;
219
220 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
221 task_pid_nr(current),
222 (long)old_encode_dev(file->minor->kdev->devt),
223 dev->open_count);
224
225 if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
226 dev->driver->preclose)
227 dev->driver->preclose(dev, file);
228
229 if (drm_core_check_feature(dev, DRIVER_LEGACY))
230 drm_legacy_lock_release(dev, file->filp);
231
232 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
233 drm_legacy_reclaim_buffers(dev, file);
234
235 drm_events_release(file);
236
237 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
238 drm_fb_release(file);
239 drm_property_destroy_user_blobs(dev, file);
240 }
241
242 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
243 drm_syncobj_release(file);
244
245 if (drm_core_check_feature(dev, DRIVER_GEM))
246 drm_gem_release(dev, file);
247
248 drm_legacy_ctxbitmap_flush(dev, file);
249
250 if (drm_is_primary_client(file))
251 drm_master_release(file);
252
253 if (dev->driver->postclose)
254 dev->driver->postclose(dev, file);
255
256 if (drm_core_check_feature(dev, DRIVER_PRIME))
257 drm_prime_destroy_file_private(&file->prime);
258
259 WARN_ON(!list_empty(&file->event_list));
260
261 put_pid(file->pid);
262 kfree(file);
263}
264
265static void drm_close_helper(struct file *filp)
266{
267 struct drm_file *file_priv = filp->private_data;
268 struct drm_device *dev = file_priv->minor->dev;
269
270 mutex_lock(&dev->filelist_mutex);
271 list_del(&file_priv->lhead);
272 mutex_unlock(&dev->filelist_mutex);
273
274 drm_file_free(file_priv);
275}
276
277
278
279
280
281
282static int drm_cpu_valid(void)
283{
284#if defined(__sparc__) && !defined(__sparc_v9__)
285 return 0;
286#endif
287 return 1;
288}
289
290
291
292
293
294
295
296
297
298
299
300static int drm_open_helper(struct file *filp, struct drm_minor *minor)
301{
302 struct drm_device *dev = minor->dev;
303 struct drm_file *priv;
304 int ret;
305
306 if (filp->f_flags & O_EXCL)
307 return -EBUSY;
308 if (!drm_cpu_valid())
309 return -EINVAL;
310 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
311 return -EINVAL;
312
313 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
314
315 priv = drm_file_alloc(minor);
316 if (IS_ERR(priv))
317 return PTR_ERR(priv);
318
319 if (drm_is_primary_client(priv)) {
320 ret = drm_master_open(priv);
321 if (ret) {
322 drm_file_free(priv);
323 return ret;
324 }
325 }
326
327 filp->private_data = priv;
328 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
329 priv->filp = filp;
330
331 mutex_lock(&dev->filelist_mutex);
332 list_add(&priv->lhead, &dev->filelist);
333 mutex_unlock(&dev->filelist_mutex);
334
335#ifdef __alpha__
336
337
338
339 if (!dev->hose) {
340 struct pci_dev *pci_dev;
341 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
342 if (pci_dev) {
343 dev->hose = pci_dev->sysdata;
344 pci_dev_put(pci_dev);
345 }
346 if (!dev->hose) {
347 struct pci_bus *b = list_entry(pci_root_buses.next,
348 struct pci_bus, node);
349 if (b)
350 dev->hose = b->sysdata;
351 }
352 }
353#endif
354
355 return 0;
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371int drm_open(struct inode *inode, struct file *filp)
372{
373 struct drm_device *dev;
374 struct drm_minor *minor;
375 int retcode;
376 int need_setup = 0;
377
378 minor = drm_minor_acquire(iminor(inode));
379 if (IS_ERR(minor))
380 return PTR_ERR(minor);
381
382 dev = minor->dev;
383 if (!dev->open_count++)
384 need_setup = 1;
385
386
387 filp->f_mapping = dev->anon_inode->i_mapping;
388
389 retcode = drm_open_helper(filp, minor);
390 if (retcode)
391 goto err_undo;
392 if (need_setup) {
393 retcode = drm_legacy_setup(dev);
394 if (retcode) {
395 drm_close_helper(filp);
396 goto err_undo;
397 }
398 }
399 return 0;
400
401err_undo:
402 dev->open_count--;
403 drm_minor_release(minor);
404 return retcode;
405}
406EXPORT_SYMBOL(drm_open);
407
408void drm_lastclose(struct drm_device * dev)
409{
410 DRM_DEBUG("\n");
411
412 if (dev->driver->lastclose)
413 dev->driver->lastclose(dev);
414 DRM_DEBUG("driver lastclose completed\n");
415
416 if (drm_core_check_feature(dev, DRIVER_LEGACY))
417 drm_legacy_dev_reinit(dev);
418
419 drm_client_dev_restore(dev);
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436int drm_release(struct inode *inode, struct file *filp)
437{
438 struct drm_file *file_priv = filp->private_data;
439 struct drm_minor *minor = file_priv->minor;
440 struct drm_device *dev = minor->dev;
441
442 mutex_lock(&drm_global_mutex);
443
444 DRM_DEBUG("open_count = %d\n", dev->open_count);
445
446 drm_close_helper(filp);
447
448 if (!--dev->open_count)
449 drm_lastclose(dev);
450
451 mutex_unlock(&drm_global_mutex);
452
453 drm_minor_release(minor);
454
455 return 0;
456}
457EXPORT_SYMBOL(drm_release);
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485ssize_t drm_read(struct file *filp, char __user *buffer,
486 size_t count, loff_t *offset)
487{
488 struct drm_file *file_priv = filp->private_data;
489 struct drm_device *dev = file_priv->minor->dev;
490 ssize_t ret;
491
492 if (!access_ok(buffer, count))
493 return -EFAULT;
494
495 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
496 if (ret)
497 return ret;
498
499 for (;;) {
500 struct drm_pending_event *e = NULL;
501
502 spin_lock_irq(&dev->event_lock);
503 if (!list_empty(&file_priv->event_list)) {
504 e = list_first_entry(&file_priv->event_list,
505 struct drm_pending_event, link);
506 file_priv->event_space += e->event->length;
507 list_del(&e->link);
508 }
509 spin_unlock_irq(&dev->event_lock);
510
511 if (e == NULL) {
512 if (ret)
513 break;
514
515 if (filp->f_flags & O_NONBLOCK) {
516 ret = -EAGAIN;
517 break;
518 }
519
520 mutex_unlock(&file_priv->event_read_lock);
521 ret = wait_event_interruptible(file_priv->event_wait,
522 !list_empty(&file_priv->event_list));
523 if (ret >= 0)
524 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
525 if (ret)
526 return ret;
527 } else {
528 unsigned length = e->event->length;
529
530 if (length > count - ret) {
531put_back_event:
532 spin_lock_irq(&dev->event_lock);
533 file_priv->event_space -= length;
534 list_add(&e->link, &file_priv->event_list);
535 spin_unlock_irq(&dev->event_lock);
536 wake_up_interruptible(&file_priv->event_wait);
537 break;
538 }
539
540 if (copy_to_user(buffer + ret, e->event, length)) {
541 if (ret == 0)
542 ret = -EFAULT;
543 goto put_back_event;
544 }
545
546 ret += length;
547 kfree(e);
548 }
549 }
550 mutex_unlock(&file_priv->event_read_lock);
551
552 return ret;
553}
554EXPORT_SYMBOL(drm_read);
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
573{
574 struct drm_file *file_priv = filp->private_data;
575 __poll_t mask = 0;
576
577 poll_wait(filp, &file_priv->event_wait, wait);
578
579 if (!list_empty(&file_priv->event_list))
580 mask |= EPOLLIN | EPOLLRDNORM;
581
582 return mask;
583}
584EXPORT_SYMBOL(drm_poll);
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610int drm_event_reserve_init_locked(struct drm_device *dev,
611 struct drm_file *file_priv,
612 struct drm_pending_event *p,
613 struct drm_event *e)
614{
615 if (file_priv->event_space < e->length)
616 return -ENOMEM;
617
618 file_priv->event_space -= e->length;
619
620 p->event = e;
621 list_add(&p->pending_link, &file_priv->pending_event_list);
622 p->file_priv = file_priv;
623
624 return 0;
625}
626EXPORT_SYMBOL(drm_event_reserve_init_locked);
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652int drm_event_reserve_init(struct drm_device *dev,
653 struct drm_file *file_priv,
654 struct drm_pending_event *p,
655 struct drm_event *e)
656{
657 unsigned long flags;
658 int ret;
659
660 spin_lock_irqsave(&dev->event_lock, flags);
661 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
662 spin_unlock_irqrestore(&dev->event_lock, flags);
663
664 return ret;
665}
666EXPORT_SYMBOL(drm_event_reserve_init);
667
668
669
670
671
672
673
674
675
676
677void drm_event_cancel_free(struct drm_device *dev,
678 struct drm_pending_event *p)
679{
680 unsigned long flags;
681 spin_lock_irqsave(&dev->event_lock, flags);
682 if (p->file_priv) {
683 p->file_priv->event_space += p->event->length;
684 list_del(&p->pending_link);
685 }
686 spin_unlock_irqrestore(&dev->event_lock, flags);
687
688 if (p->fence)
689 dma_fence_put(p->fence);
690
691 kfree(p);
692}
693EXPORT_SYMBOL(drm_event_cancel_free);
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
710{
711 assert_spin_locked(&dev->event_lock);
712
713 if (e->completion) {
714 complete_all(e->completion);
715 e->completion_release(e->completion);
716 e->completion = NULL;
717 }
718
719 if (e->fence) {
720 dma_fence_signal(e->fence);
721 dma_fence_put(e->fence);
722 }
723
724 if (!e->file_priv) {
725 kfree(e);
726 return;
727 }
728
729 list_del(&e->pending_link);
730 list_add_tail(&e->link,
731 &e->file_priv->event_list);
732 wake_up_interruptible(&e->file_priv->event_wait);
733}
734EXPORT_SYMBOL(drm_send_event_locked);
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
752{
753 unsigned long irqflags;
754
755 spin_lock_irqsave(&dev->event_lock, irqflags);
756 drm_send_event_locked(dev, e);
757 spin_unlock_irqrestore(&dev->event_lock, irqflags);
758}
759EXPORT_SYMBOL(drm_send_event);
760