1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include <drm/drmP.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78#if BITS_PER_LONG == 64
79#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
81#else
82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
84#endif
85
86
87
88
89
90int
91drm_gem_init(struct drm_device *dev)
92{
93 struct drm_gem_mm *mm;
94
95 spin_lock_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr);
97
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
99 if (!mm) {
100 DRM_ERROR("out of memory\n");
101 return -ENOMEM;
102 }
103
104 dev->mm_private = mm;
105
106 if (drm_ht_create(&mm->offset_hash, 12)) {
107 kfree(mm);
108 return -ENOMEM;
109 }
110
111 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE);
113
114 return 0;
115}
116
117void
118drm_gem_destroy(struct drm_device *dev)
119{
120 struct drm_gem_mm *mm = dev->mm_private;
121
122 drm_mm_takedown(&mm->offset_manager);
123 drm_ht_remove(&mm->offset_hash);
124 kfree(mm);
125 dev->mm_private = NULL;
126}
127
128
129
130
131
132int drm_gem_object_init(struct drm_device *dev,
133 struct drm_gem_object *obj, size_t size)
134{
135 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
136
137 obj->dev = dev;
138 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
139 if (IS_ERR(obj->filp))
140 return PTR_ERR(obj->filp);
141
142 kref_init(&obj->refcount);
143 atomic_set(&obj->handle_count, 0);
144 obj->size = size;
145
146 return 0;
147}
148EXPORT_SYMBOL(drm_gem_object_init);
149
150
151
152
153
154
155int drm_gem_private_object_init(struct drm_device *dev,
156 struct drm_gem_object *obj, size_t size)
157{
158 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
159
160 obj->dev = dev;
161 obj->filp = NULL;
162
163 kref_init(&obj->refcount);
164 atomic_set(&obj->handle_count, 0);
165 obj->size = size;
166
167 return 0;
168}
169EXPORT_SYMBOL(drm_gem_private_object_init);
170
171
172
173
174struct drm_gem_object *
175drm_gem_object_alloc(struct drm_device *dev, size_t size)
176{
177 struct drm_gem_object *obj;
178
179 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
180 if (!obj)
181 goto free;
182
183 if (drm_gem_object_init(dev, obj, size) != 0)
184 goto free;
185
186 if (dev->driver->gem_init_object != NULL &&
187 dev->driver->gem_init_object(obj) != 0) {
188 goto fput;
189 }
190 return obj;
191fput:
192
193 fput(obj->filp);
194free:
195 kfree(obj);
196 return NULL;
197}
198EXPORT_SYMBOL(drm_gem_object_alloc);
199
200static void
201drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
202{
203 if (obj->import_attach) {
204 drm_prime_remove_buf_handle(&filp->prime,
205 obj->import_attach->dmabuf);
206 }
207 if (obj->export_dma_buf) {
208 drm_prime_remove_buf_handle(&filp->prime,
209 obj->export_dma_buf);
210 }
211}
212
213
214
215
216int
217drm_gem_handle_delete(struct drm_file *filp, u32 handle)
218{
219 struct drm_device *dev;
220 struct drm_gem_object *obj;
221
222
223
224
225
226
227
228
229
230
231 spin_lock(&filp->table_lock);
232
233
234 obj = idr_find(&filp->object_idr, handle);
235 if (obj == NULL) {
236 spin_unlock(&filp->table_lock);
237 return -EINVAL;
238 }
239 dev = obj->dev;
240
241
242 idr_remove(&filp->object_idr, handle);
243 spin_unlock(&filp->table_lock);
244
245 drm_gem_remove_prime_handles(obj, filp);
246
247 if (dev->driver->gem_close_object)
248 dev->driver->gem_close_object(obj, filp);
249 drm_gem_object_handle_unreference_unlocked(obj);
250
251 return 0;
252}
253EXPORT_SYMBOL(drm_gem_handle_delete);
254
255
256
257
258
259
260int
261drm_gem_handle_create(struct drm_file *file_priv,
262 struct drm_gem_object *obj,
263 u32 *handlep)
264{
265 struct drm_device *dev = obj->dev;
266 int ret;
267
268
269
270
271
272 idr_preload(GFP_KERNEL);
273 spin_lock(&file_priv->table_lock);
274
275 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
276
277 spin_unlock(&file_priv->table_lock);
278 idr_preload_end();
279 if (ret < 0)
280 return ret;
281 *handlep = ret;
282
283 drm_gem_object_handle_reference(obj);
284
285 if (dev->driver->gem_open_object) {
286 ret = dev->driver->gem_open_object(obj, file_priv);
287 if (ret) {
288 drm_gem_handle_delete(file_priv, *handlep);
289 return ret;
290 }
291 }
292
293 return 0;
294}
295EXPORT_SYMBOL(drm_gem_handle_create);
296
297
298
299
300
301
302
303
304void
305drm_gem_free_mmap_offset(struct drm_gem_object *obj)
306{
307 struct drm_device *dev = obj->dev;
308 struct drm_gem_mm *mm = dev->mm_private;
309 struct drm_map_list *list = &obj->map_list;
310
311 drm_ht_remove_item(&mm->offset_hash, &list->hash);
312 drm_mm_put_block(list->file_offset_node);
313 kfree(list->map);
314 list->map = NULL;
315}
316EXPORT_SYMBOL(drm_gem_free_mmap_offset);
317
318
319
320
321
322
323
324
325
326
327
328
329int
330drm_gem_create_mmap_offset(struct drm_gem_object *obj)
331{
332 struct drm_device *dev = obj->dev;
333 struct drm_gem_mm *mm = dev->mm_private;
334 struct drm_map_list *list;
335 struct drm_local_map *map;
336 int ret;
337
338
339 list = &obj->map_list;
340 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
341 if (!list->map)
342 return -ENOMEM;
343
344 map = list->map;
345 map->type = _DRM_GEM;
346 map->size = obj->size;
347 map->handle = obj;
348
349
350 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
351 obj->size / PAGE_SIZE, 0, false);
352
353 if (!list->file_offset_node) {
354 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
355 ret = -ENOSPC;
356 goto out_free_list;
357 }
358
359 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
360 obj->size / PAGE_SIZE, 0);
361 if (!list->file_offset_node) {
362 ret = -ENOMEM;
363 goto out_free_list;
364 }
365
366 list->hash.key = list->file_offset_node->start;
367 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
368 if (ret) {
369 DRM_ERROR("failed to add to map hash\n");
370 goto out_free_mm;
371 }
372
373 return 0;
374
375out_free_mm:
376 drm_mm_put_block(list->file_offset_node);
377out_free_list:
378 kfree(list->map);
379 list->map = NULL;
380
381 return ret;
382}
383EXPORT_SYMBOL(drm_gem_create_mmap_offset);
384
385
386struct drm_gem_object *
387drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
388 u32 handle)
389{
390 struct drm_gem_object *obj;
391
392 spin_lock(&filp->table_lock);
393
394
395 obj = idr_find(&filp->object_idr, handle);
396 if (obj == NULL) {
397 spin_unlock(&filp->table_lock);
398 return NULL;
399 }
400
401 drm_gem_object_reference(obj);
402
403 spin_unlock(&filp->table_lock);
404
405 return obj;
406}
407EXPORT_SYMBOL(drm_gem_object_lookup);
408
409
410
411
412int
413drm_gem_close_ioctl(struct drm_device *dev, void *data,
414 struct drm_file *file_priv)
415{
416 struct drm_gem_close *args = data;
417 int ret;
418
419 if (!(dev->driver->driver_features & DRIVER_GEM))
420 return -ENODEV;
421
422 ret = drm_gem_handle_delete(file_priv, args->handle);
423
424 return ret;
425}
426
427
428
429
430
431
432
433int
434drm_gem_flink_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 struct drm_gem_flink *args = data;
438 struct drm_gem_object *obj;
439 int ret;
440
441 if (!(dev->driver->driver_features & DRIVER_GEM))
442 return -ENODEV;
443
444 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
445 if (obj == NULL)
446 return -ENOENT;
447
448 idr_preload(GFP_KERNEL);
449 spin_lock(&dev->object_name_lock);
450 if (!obj->name) {
451 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
452 if (ret < 0)
453 goto err;
454
455 obj->name = ret;
456
457
458 drm_gem_object_reference(obj);
459 }
460
461 args->name = (uint64_t) obj->name;
462 ret = 0;
463
464err:
465 spin_unlock(&dev->object_name_lock);
466 idr_preload_end();
467 drm_gem_object_unreference_unlocked(obj);
468 return ret;
469}
470
471
472
473
474
475
476
477int
478drm_gem_open_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file_priv)
480{
481 struct drm_gem_open *args = data;
482 struct drm_gem_object *obj;
483 int ret;
484 u32 handle;
485
486 if (!(dev->driver->driver_features & DRIVER_GEM))
487 return -ENODEV;
488
489 spin_lock(&dev->object_name_lock);
490 obj = idr_find(&dev->object_name_idr, (int) args->name);
491 if (obj)
492 drm_gem_object_reference(obj);
493 spin_unlock(&dev->object_name_lock);
494 if (!obj)
495 return -ENOENT;
496
497 ret = drm_gem_handle_create(file_priv, obj, &handle);
498 drm_gem_object_unreference_unlocked(obj);
499 if (ret)
500 return ret;
501
502 args->handle = handle;
503 args->size = obj->size;
504
505 return 0;
506}
507
508
509
510
511
512void
513drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
514{
515 idr_init(&file_private->object_idr);
516 spin_lock_init(&file_private->table_lock);
517}
518
519
520
521
522
523static int
524drm_gem_object_release_handle(int id, void *ptr, void *data)
525{
526 struct drm_file *file_priv = data;
527 struct drm_gem_object *obj = ptr;
528 struct drm_device *dev = obj->dev;
529
530 drm_gem_remove_prime_handles(obj, file_priv);
531
532 if (dev->driver->gem_close_object)
533 dev->driver->gem_close_object(obj, file_priv);
534
535 drm_gem_object_handle_unreference_unlocked(obj);
536
537 return 0;
538}
539
540
541
542
543
544
545void
546drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
547{
548 idr_for_each(&file_private->object_idr,
549 &drm_gem_object_release_handle, file_private);
550 idr_destroy(&file_private->object_idr);
551}
552
553void
554drm_gem_object_release(struct drm_gem_object *obj)
555{
556 if (obj->filp)
557 fput(obj->filp);
558}
559EXPORT_SYMBOL(drm_gem_object_release);
560
561
562
563
564
565
566
567void
568drm_gem_object_free(struct kref *kref)
569{
570 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
571 struct drm_device *dev = obj->dev;
572
573 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
574
575 if (dev->driver->gem_free_object != NULL)
576 dev->driver->gem_free_object(obj);
577}
578EXPORT_SYMBOL(drm_gem_object_free);
579
580static void drm_gem_object_ref_bug(struct kref *list_kref)
581{
582 BUG();
583}
584
585
586
587
588
589
590
591
592void drm_gem_object_handle_free(struct drm_gem_object *obj)
593{
594 struct drm_device *dev = obj->dev;
595
596
597 spin_lock(&dev->object_name_lock);
598 if (obj->name) {
599 idr_remove(&dev->object_name_idr, obj->name);
600 obj->name = 0;
601 spin_unlock(&dev->object_name_lock);
602
603
604
605
606
607
608 kref_put(&obj->refcount, drm_gem_object_ref_bug);
609 } else
610 spin_unlock(&dev->object_name_lock);
611
612}
613EXPORT_SYMBOL(drm_gem_object_handle_free);
614
615void drm_gem_vm_open(struct vm_area_struct *vma)
616{
617 struct drm_gem_object *obj = vma->vm_private_data;
618
619 drm_gem_object_reference(obj);
620
621 mutex_lock(&obj->dev->struct_mutex);
622 drm_vm_open_locked(obj->dev, vma);
623 mutex_unlock(&obj->dev->struct_mutex);
624}
625EXPORT_SYMBOL(drm_gem_vm_open);
626
627void drm_gem_vm_close(struct vm_area_struct *vma)
628{
629 struct drm_gem_object *obj = vma->vm_private_data;
630 struct drm_device *dev = obj->dev;
631
632 mutex_lock(&dev->struct_mutex);
633 drm_vm_close_locked(obj->dev, vma);
634 drm_gem_object_unreference(obj);
635 mutex_unlock(&dev->struct_mutex);
636}
637EXPORT_SYMBOL(drm_gem_vm_close);
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
662 struct vm_area_struct *vma)
663{
664 struct drm_device *dev = obj->dev;
665
666 lockdep_assert_held(&dev->struct_mutex);
667
668
669 if (obj_size < vma->vm_end - vma->vm_start)
670 return -EINVAL;
671
672 if (!dev->driver->gem_vm_ops)
673 return -EINVAL;
674
675 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
676 vma->vm_ops = dev->driver->gem_vm_ops;
677 vma->vm_private_data = obj;
678 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
679
680
681
682
683
684
685
686 drm_gem_object_reference(obj);
687
688 drm_vm_open_locked(dev, vma);
689 return 0;
690}
691EXPORT_SYMBOL(drm_gem_mmap_obj);
692
693
694
695
696
697
698
699
700
701
702
703
704
705int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
706{
707 struct drm_file *priv = filp->private_data;
708 struct drm_device *dev = priv->minor->dev;
709 struct drm_gem_mm *mm = dev->mm_private;
710 struct drm_local_map *map = NULL;
711 struct drm_hash_item *hash;
712 int ret = 0;
713
714 if (drm_device_is_unplugged(dev))
715 return -ENODEV;
716
717 mutex_lock(&dev->struct_mutex);
718
719 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
720 mutex_unlock(&dev->struct_mutex);
721 return drm_mmap(filp, vma);
722 }
723
724 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
725 if (!map ||
726 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
727 ret = -EPERM;
728 goto out_unlock;
729 }
730
731 ret = drm_gem_mmap_obj(map->handle, map->size, vma);
732
733out_unlock:
734 mutex_unlock(&dev->struct_mutex);
735
736 return ret;
737}
738EXPORT_SYMBOL(drm_gem_mmap);
739