1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include <drm/drmP.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78#if BITS_PER_LONG == 64
79#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
81#else
82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
84#endif
85
86
87
88
89
90int
91drm_gem_init(struct drm_device *dev)
92{
93 struct drm_gem_mm *mm;
94
95 spin_lock_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr);
97
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
99 if (!mm) {
100 DRM_ERROR("out of memory\n");
101 return -ENOMEM;
102 }
103
104 dev->mm_private = mm;
105
106 if (drm_ht_create(&mm->offset_hash, 12)) {
107 kfree(mm);
108 return -ENOMEM;
109 }
110
111 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE)) {
113 drm_ht_remove(&mm->offset_hash);
114 kfree(mm);
115 return -ENOMEM;
116 }
117
118 return 0;
119}
120
121void
122drm_gem_destroy(struct drm_device *dev)
123{
124 struct drm_gem_mm *mm = dev->mm_private;
125
126 drm_mm_takedown(&mm->offset_manager);
127 drm_ht_remove(&mm->offset_hash);
128 kfree(mm);
129 dev->mm_private = NULL;
130}
131
132
133
134
135
136int drm_gem_object_init(struct drm_device *dev,
137 struct drm_gem_object *obj, size_t size)
138{
139 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
140
141 obj->dev = dev;
142 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 if (IS_ERR(obj->filp))
144 return PTR_ERR(obj->filp);
145
146 kref_init(&obj->refcount);
147 atomic_set(&obj->handle_count, 0);
148 obj->size = size;
149
150 return 0;
151}
152EXPORT_SYMBOL(drm_gem_object_init);
153
154
155
156
157
158
159int drm_gem_private_object_init(struct drm_device *dev,
160 struct drm_gem_object *obj, size_t size)
161{
162 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
163
164 obj->dev = dev;
165 obj->filp = NULL;
166
167 kref_init(&obj->refcount);
168 atomic_set(&obj->handle_count, 0);
169 obj->size = size;
170
171 return 0;
172}
173EXPORT_SYMBOL(drm_gem_private_object_init);
174
175
176
177
178struct drm_gem_object *
179drm_gem_object_alloc(struct drm_device *dev, size_t size)
180{
181 struct drm_gem_object *obj;
182
183 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
184 if (!obj)
185 goto free;
186
187 if (drm_gem_object_init(dev, obj, size) != 0)
188 goto free;
189
190 if (dev->driver->gem_init_object != NULL &&
191 dev->driver->gem_init_object(obj) != 0) {
192 goto fput;
193 }
194 return obj;
195fput:
196
197 fput(obj->filp);
198free:
199 kfree(obj);
200 return NULL;
201}
202EXPORT_SYMBOL(drm_gem_object_alloc);
203
204static void
205drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
206{
207 if (obj->import_attach) {
208 drm_prime_remove_buf_handle(&filp->prime,
209 obj->import_attach->dmabuf);
210 }
211 if (obj->export_dma_buf) {
212 drm_prime_remove_buf_handle(&filp->prime,
213 obj->export_dma_buf);
214 }
215}
216
217
218
219
220int
221drm_gem_handle_delete(struct drm_file *filp, u32 handle)
222{
223 struct drm_device *dev;
224 struct drm_gem_object *obj;
225
226
227
228
229
230
231
232
233
234
235 spin_lock(&filp->table_lock);
236
237
238 obj = idr_find(&filp->object_idr, handle);
239 if (obj == NULL) {
240 spin_unlock(&filp->table_lock);
241 return -EINVAL;
242 }
243 dev = obj->dev;
244
245
246 idr_remove(&filp->object_idr, handle);
247 spin_unlock(&filp->table_lock);
248
249 drm_gem_remove_prime_handles(obj, filp);
250
251 if (dev->driver->gem_close_object)
252 dev->driver->gem_close_object(obj, filp);
253 drm_gem_object_handle_unreference_unlocked(obj);
254
255 return 0;
256}
257EXPORT_SYMBOL(drm_gem_handle_delete);
258
259
260
261
262
263
264int
265drm_gem_handle_create(struct drm_file *file_priv,
266 struct drm_gem_object *obj,
267 u32 *handlep)
268{
269 struct drm_device *dev = obj->dev;
270 int ret;
271
272
273
274
275
276 idr_preload(GFP_KERNEL);
277 spin_lock(&file_priv->table_lock);
278
279 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
280
281 spin_unlock(&file_priv->table_lock);
282 idr_preload_end();
283 if (ret < 0)
284 return ret;
285 *handlep = ret;
286
287 drm_gem_object_handle_reference(obj);
288
289 if (dev->driver->gem_open_object) {
290 ret = dev->driver->gem_open_object(obj, file_priv);
291 if (ret) {
292 drm_gem_handle_delete(file_priv, *handlep);
293 return ret;
294 }
295 }
296
297 return 0;
298}
299EXPORT_SYMBOL(drm_gem_handle_create);
300
301
302
303
304
305
306
307
308void
309drm_gem_free_mmap_offset(struct drm_gem_object *obj)
310{
311 struct drm_device *dev = obj->dev;
312 struct drm_gem_mm *mm = dev->mm_private;
313 struct drm_map_list *list = &obj->map_list;
314
315 drm_ht_remove_item(&mm->offset_hash, &list->hash);
316 drm_mm_put_block(list->file_offset_node);
317 kfree(list->map);
318 list->map = NULL;
319}
320EXPORT_SYMBOL(drm_gem_free_mmap_offset);
321
322
323
324
325
326
327
328
329
330
331
332
333int
334drm_gem_create_mmap_offset(struct drm_gem_object *obj)
335{
336 struct drm_device *dev = obj->dev;
337 struct drm_gem_mm *mm = dev->mm_private;
338 struct drm_map_list *list;
339 struct drm_local_map *map;
340 int ret;
341
342
343 list = &obj->map_list;
344 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
345 if (!list->map)
346 return -ENOMEM;
347
348 map = list->map;
349 map->type = _DRM_GEM;
350 map->size = obj->size;
351 map->handle = obj;
352
353
354 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
355 obj->size / PAGE_SIZE, 0, false);
356
357 if (!list->file_offset_node) {
358 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
359 ret = -ENOSPC;
360 goto out_free_list;
361 }
362
363 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
364 obj->size / PAGE_SIZE, 0);
365 if (!list->file_offset_node) {
366 ret = -ENOMEM;
367 goto out_free_list;
368 }
369
370 list->hash.key = list->file_offset_node->start;
371 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
372 if (ret) {
373 DRM_ERROR("failed to add to map hash\n");
374 goto out_free_mm;
375 }
376
377 return 0;
378
379out_free_mm:
380 drm_mm_put_block(list->file_offset_node);
381out_free_list:
382 kfree(list->map);
383 list->map = NULL;
384
385 return ret;
386}
387EXPORT_SYMBOL(drm_gem_create_mmap_offset);
388
389
390struct drm_gem_object *
391drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
392 u32 handle)
393{
394 struct drm_gem_object *obj;
395
396 spin_lock(&filp->table_lock);
397
398
399 obj = idr_find(&filp->object_idr, handle);
400 if (obj == NULL) {
401 spin_unlock(&filp->table_lock);
402 return NULL;
403 }
404
405 drm_gem_object_reference(obj);
406
407 spin_unlock(&filp->table_lock);
408
409 return obj;
410}
411EXPORT_SYMBOL(drm_gem_object_lookup);
412
413
414
415
416int
417drm_gem_close_ioctl(struct drm_device *dev, void *data,
418 struct drm_file *file_priv)
419{
420 struct drm_gem_close *args = data;
421 int ret;
422
423 if (!(dev->driver->driver_features & DRIVER_GEM))
424 return -ENODEV;
425
426 ret = drm_gem_handle_delete(file_priv, args->handle);
427
428 return ret;
429}
430
431
432
433
434
435
436
437int
438drm_gem_flink_ioctl(struct drm_device *dev, void *data,
439 struct drm_file *file_priv)
440{
441 struct drm_gem_flink *args = data;
442 struct drm_gem_object *obj;
443 int ret;
444
445 if (!(dev->driver->driver_features & DRIVER_GEM))
446 return -ENODEV;
447
448 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
449 if (obj == NULL)
450 return -ENOENT;
451
452 idr_preload(GFP_KERNEL);
453 spin_lock(&dev->object_name_lock);
454 if (!obj->name) {
455 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
456 obj->name = ret;
457 args->name = (uint64_t) obj->name;
458 spin_unlock(&dev->object_name_lock);
459 idr_preload_end();
460
461 if (ret < 0)
462 goto err;
463 ret = 0;
464
465
466 drm_gem_object_reference(obj);
467 } else {
468 args->name = (uint64_t) obj->name;
469 spin_unlock(&dev->object_name_lock);
470 idr_preload_end();
471 ret = 0;
472 }
473
474err:
475 drm_gem_object_unreference_unlocked(obj);
476 return ret;
477}
478
479
480
481
482
483
484
485int
486drm_gem_open_ioctl(struct drm_device *dev, void *data,
487 struct drm_file *file_priv)
488{
489 struct drm_gem_open *args = data;
490 struct drm_gem_object *obj;
491 int ret;
492 u32 handle;
493
494 if (!(dev->driver->driver_features & DRIVER_GEM))
495 return -ENODEV;
496
497 spin_lock(&dev->object_name_lock);
498 obj = idr_find(&dev->object_name_idr, (int) args->name);
499 if (obj)
500 drm_gem_object_reference(obj);
501 spin_unlock(&dev->object_name_lock);
502 if (!obj)
503 return -ENOENT;
504
505 ret = drm_gem_handle_create(file_priv, obj, &handle);
506 drm_gem_object_unreference_unlocked(obj);
507 if (ret)
508 return ret;
509
510 args->handle = handle;
511 args->size = obj->size;
512
513 return 0;
514}
515
516
517
518
519
520void
521drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
522{
523 idr_init(&file_private->object_idr);
524 spin_lock_init(&file_private->table_lock);
525}
526
527
528
529
530
531static int
532drm_gem_object_release_handle(int id, void *ptr, void *data)
533{
534 struct drm_file *file_priv = data;
535 struct drm_gem_object *obj = ptr;
536 struct drm_device *dev = obj->dev;
537
538 drm_gem_remove_prime_handles(obj, file_priv);
539
540 if (dev->driver->gem_close_object)
541 dev->driver->gem_close_object(obj, file_priv);
542
543 drm_gem_object_handle_unreference_unlocked(obj);
544
545 return 0;
546}
547
548
549
550
551
552
553void
554drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
555{
556 idr_for_each(&file_private->object_idr,
557 &drm_gem_object_release_handle, file_private);
558 idr_destroy(&file_private->object_idr);
559}
560
561void
562drm_gem_object_release(struct drm_gem_object *obj)
563{
564 if (obj->filp)
565 fput(obj->filp);
566}
567EXPORT_SYMBOL(drm_gem_object_release);
568
569
570
571
572
573
574
575void
576drm_gem_object_free(struct kref *kref)
577{
578 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
579 struct drm_device *dev = obj->dev;
580
581 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
582
583 if (dev->driver->gem_free_object != NULL)
584 dev->driver->gem_free_object(obj);
585}
586EXPORT_SYMBOL(drm_gem_object_free);
587
588static void drm_gem_object_ref_bug(struct kref *list_kref)
589{
590 BUG();
591}
592
593
594
595
596
597
598
599
600void drm_gem_object_handle_free(struct drm_gem_object *obj)
601{
602 struct drm_device *dev = obj->dev;
603
604
605 spin_lock(&dev->object_name_lock);
606 if (obj->name) {
607 idr_remove(&dev->object_name_idr, obj->name);
608 obj->name = 0;
609 spin_unlock(&dev->object_name_lock);
610
611
612
613
614
615
616 kref_put(&obj->refcount, drm_gem_object_ref_bug);
617 } else
618 spin_unlock(&dev->object_name_lock);
619
620}
621EXPORT_SYMBOL(drm_gem_object_handle_free);
622
623void drm_gem_vm_open(struct vm_area_struct *vma)
624{
625 struct drm_gem_object *obj = vma->vm_private_data;
626
627 drm_gem_object_reference(obj);
628
629 mutex_lock(&obj->dev->struct_mutex);
630 drm_vm_open_locked(obj->dev, vma);
631 mutex_unlock(&obj->dev->struct_mutex);
632}
633EXPORT_SYMBOL(drm_gem_vm_open);
634
635void drm_gem_vm_close(struct vm_area_struct *vma)
636{
637 struct drm_gem_object *obj = vma->vm_private_data;
638 struct drm_device *dev = obj->dev;
639
640 mutex_lock(&dev->struct_mutex);
641 drm_vm_close_locked(obj->dev, vma);
642 drm_gem_object_unreference(obj);
643 mutex_unlock(&dev->struct_mutex);
644}
645EXPORT_SYMBOL(drm_gem_vm_close);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
663{
664 struct drm_file *priv = filp->private_data;
665 struct drm_device *dev = priv->minor->dev;
666 struct drm_gem_mm *mm = dev->mm_private;
667 struct drm_local_map *map = NULL;
668 struct drm_gem_object *obj;
669 struct drm_hash_item *hash;
670 int ret = 0;
671
672 if (drm_device_is_unplugged(dev))
673 return -ENODEV;
674
675 mutex_lock(&dev->struct_mutex);
676
677 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
678 mutex_unlock(&dev->struct_mutex);
679 return drm_mmap(filp, vma);
680 }
681
682 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
683 if (!map ||
684 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
685 ret = -EPERM;
686 goto out_unlock;
687 }
688
689
690 if (map->size < vma->vm_end - vma->vm_start) {
691 ret = -EINVAL;
692 goto out_unlock;
693 }
694
695 obj = map->handle;
696 if (!obj->dev->driver->gem_vm_ops) {
697 ret = -EINVAL;
698 goto out_unlock;
699 }
700
701 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
702 vma->vm_ops = obj->dev->driver->gem_vm_ops;
703 vma->vm_private_data = map->handle;
704 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
705
706
707
708
709
710
711
712 drm_gem_object_reference(obj);
713
714 drm_vm_open_locked(dev, vma);
715
716out_unlock:
717 mutex_unlock(&dev->struct_mutex);
718
719 return ret;
720}
721EXPORT_SYMBOL(drm_gem_mmap);
722