1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
37#include <linux/shmem_fs.h>
38#include <linux/dma-buf.h>
39#include "drmP.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78#if BITS_PER_LONG == 64
79#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
80#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
81#else
82#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
84#endif
85
86
87
88
89
90int
91drm_gem_init(struct drm_device *dev)
92{
93 struct drm_gem_mm *mm;
94
95 spin_lock_init(&dev->object_name_lock);
96 idr_init(&dev->object_name_idr);
97
98 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
99 if (!mm) {
100 DRM_ERROR("out of memory\n");
101 return -ENOMEM;
102 }
103
104 dev->mm_private = mm;
105
106 if (drm_ht_create(&mm->offset_hash, 12)) {
107 kfree(mm);
108 return -ENOMEM;
109 }
110
111 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
112 DRM_FILE_PAGE_OFFSET_SIZE)) {
113 drm_ht_remove(&mm->offset_hash);
114 kfree(mm);
115 return -ENOMEM;
116 }
117
118 return 0;
119}
120
121void
122drm_gem_destroy(struct drm_device *dev)
123{
124 struct drm_gem_mm *mm = dev->mm_private;
125
126 drm_mm_takedown(&mm->offset_manager);
127 drm_ht_remove(&mm->offset_hash);
128 kfree(mm);
129 dev->mm_private = NULL;
130}
131
132
133
134
135
136int drm_gem_object_init(struct drm_device *dev,
137 struct drm_gem_object *obj, size_t size)
138{
139 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
140
141 obj->dev = dev;
142 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 if (IS_ERR(obj->filp))
144 return PTR_ERR(obj->filp);
145
146 kref_init(&obj->refcount);
147 atomic_set(&obj->handle_count, 0);
148 obj->size = size;
149
150 return 0;
151}
152EXPORT_SYMBOL(drm_gem_object_init);
153
154
155
156
157
158
159int drm_gem_private_object_init(struct drm_device *dev,
160 struct drm_gem_object *obj, size_t size)
161{
162 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
163
164 obj->dev = dev;
165 obj->filp = NULL;
166
167 kref_init(&obj->refcount);
168 atomic_set(&obj->handle_count, 0);
169 obj->size = size;
170
171 return 0;
172}
173EXPORT_SYMBOL(drm_gem_private_object_init);
174
175
176
177
178struct drm_gem_object *
179drm_gem_object_alloc(struct drm_device *dev, size_t size)
180{
181 struct drm_gem_object *obj;
182
183 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
184 if (!obj)
185 goto free;
186
187 if (drm_gem_object_init(dev, obj, size) != 0)
188 goto free;
189
190 if (dev->driver->gem_init_object != NULL &&
191 dev->driver->gem_init_object(obj) != 0) {
192 goto fput;
193 }
194 return obj;
195fput:
196
197 fput(obj->filp);
198free:
199 kfree(obj);
200 return NULL;
201}
202EXPORT_SYMBOL(drm_gem_object_alloc);
203
204
205
206
207int
208drm_gem_handle_delete(struct drm_file *filp, u32 handle)
209{
210 struct drm_device *dev;
211 struct drm_gem_object *obj;
212
213
214
215
216
217
218
219
220
221
222 spin_lock(&filp->table_lock);
223
224
225 obj = idr_find(&filp->object_idr, handle);
226 if (obj == NULL) {
227 spin_unlock(&filp->table_lock);
228 return -EINVAL;
229 }
230 dev = obj->dev;
231
232
233 idr_remove(&filp->object_idr, handle);
234 spin_unlock(&filp->table_lock);
235
236 if (obj->import_attach)
237 drm_prime_remove_imported_buf_handle(&filp->prime,
238 obj->import_attach->dmabuf);
239
240 if (dev->driver->gem_close_object)
241 dev->driver->gem_close_object(obj, filp);
242 drm_gem_object_handle_unreference_unlocked(obj);
243
244 return 0;
245}
246EXPORT_SYMBOL(drm_gem_handle_delete);
247
248
249
250
251
252
253int
254drm_gem_handle_create(struct drm_file *file_priv,
255 struct drm_gem_object *obj,
256 u32 *handlep)
257{
258 struct drm_device *dev = obj->dev;
259 int ret;
260
261
262
263
264again:
265
266 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
267 return -ENOMEM;
268
269
270 spin_lock(&file_priv->table_lock);
271 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
272 spin_unlock(&file_priv->table_lock);
273 if (ret == -EAGAIN)
274 goto again;
275
276 if (ret != 0)
277 return ret;
278
279 drm_gem_object_handle_reference(obj);
280
281 if (dev->driver->gem_open_object) {
282 ret = dev->driver->gem_open_object(obj, file_priv);
283 if (ret) {
284 drm_gem_handle_delete(file_priv, *handlep);
285 return ret;
286 }
287 }
288
289 return 0;
290}
291EXPORT_SYMBOL(drm_gem_handle_create);
292
293
294
295
296
297
298
299
300void
301drm_gem_free_mmap_offset(struct drm_gem_object *obj)
302{
303 struct drm_device *dev = obj->dev;
304 struct drm_gem_mm *mm = dev->mm_private;
305 struct drm_map_list *list = &obj->map_list;
306
307 drm_ht_remove_item(&mm->offset_hash, &list->hash);
308 drm_mm_put_block(list->file_offset_node);
309 kfree(list->map);
310 list->map = NULL;
311}
312EXPORT_SYMBOL(drm_gem_free_mmap_offset);
313
314
315
316
317
318
319
320
321
322
323
324
325int
326drm_gem_create_mmap_offset(struct drm_gem_object *obj)
327{
328 struct drm_device *dev = obj->dev;
329 struct drm_gem_mm *mm = dev->mm_private;
330 struct drm_map_list *list;
331 struct drm_local_map *map;
332 int ret = 0;
333
334
335 list = &obj->map_list;
336 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
337 if (!list->map)
338 return -ENOMEM;
339
340 map = list->map;
341 map->type = _DRM_GEM;
342 map->size = obj->size;
343 map->handle = obj;
344
345
346 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
347 obj->size / PAGE_SIZE, 0, 0);
348
349 if (!list->file_offset_node) {
350 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
351 ret = -ENOSPC;
352 goto out_free_list;
353 }
354
355 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
356 obj->size / PAGE_SIZE, 0);
357 if (!list->file_offset_node) {
358 ret = -ENOMEM;
359 goto out_free_list;
360 }
361
362 list->hash.key = list->file_offset_node->start;
363 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
364 if (ret) {
365 DRM_ERROR("failed to add to map hash\n");
366 goto out_free_mm;
367 }
368
369 return 0;
370
371out_free_mm:
372 drm_mm_put_block(list->file_offset_node);
373out_free_list:
374 kfree(list->map);
375 list->map = NULL;
376
377 return ret;
378}
379EXPORT_SYMBOL(drm_gem_create_mmap_offset);
380
381
382struct drm_gem_object *
383drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
384 u32 handle)
385{
386 struct drm_gem_object *obj;
387
388 spin_lock(&filp->table_lock);
389
390
391 obj = idr_find(&filp->object_idr, handle);
392 if (obj == NULL) {
393 spin_unlock(&filp->table_lock);
394 return NULL;
395 }
396
397 drm_gem_object_reference(obj);
398
399 spin_unlock(&filp->table_lock);
400
401 return obj;
402}
403EXPORT_SYMBOL(drm_gem_object_lookup);
404
405
406
407
408int
409drm_gem_close_ioctl(struct drm_device *dev, void *data,
410 struct drm_file *file_priv)
411{
412 struct drm_gem_close *args = data;
413 int ret;
414
415 if (!(dev->driver->driver_features & DRIVER_GEM))
416 return -ENODEV;
417
418 ret = drm_gem_handle_delete(file_priv, args->handle);
419
420 return ret;
421}
422
423
424
425
426
427
428
429int
430drm_gem_flink_ioctl(struct drm_device *dev, void *data,
431 struct drm_file *file_priv)
432{
433 struct drm_gem_flink *args = data;
434 struct drm_gem_object *obj;
435 int ret;
436
437 if (!(dev->driver->driver_features & DRIVER_GEM))
438 return -ENODEV;
439
440 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
441 if (obj == NULL)
442 return -ENOENT;
443
444again:
445 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
446 ret = -ENOMEM;
447 goto err;
448 }
449
450 spin_lock(&dev->object_name_lock);
451 if (!obj->name) {
452 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
453 &obj->name);
454 args->name = (uint64_t) obj->name;
455 spin_unlock(&dev->object_name_lock);
456
457 if (ret == -EAGAIN)
458 goto again;
459
460 if (ret != 0)
461 goto err;
462
463
464 drm_gem_object_reference(obj);
465 } else {
466 args->name = (uint64_t) obj->name;
467 spin_unlock(&dev->object_name_lock);
468 ret = 0;
469 }
470
471err:
472 drm_gem_object_unreference_unlocked(obj);
473 return ret;
474}
475
476
477
478
479
480
481
482int
483drm_gem_open_ioctl(struct drm_device *dev, void *data,
484 struct drm_file *file_priv)
485{
486 struct drm_gem_open *args = data;
487 struct drm_gem_object *obj;
488 int ret;
489 u32 handle;
490
491 if (!(dev->driver->driver_features & DRIVER_GEM))
492 return -ENODEV;
493
494 spin_lock(&dev->object_name_lock);
495 obj = idr_find(&dev->object_name_idr, (int) args->name);
496 if (obj)
497 drm_gem_object_reference(obj);
498 spin_unlock(&dev->object_name_lock);
499 if (!obj)
500 return -ENOENT;
501
502 ret = drm_gem_handle_create(file_priv, obj, &handle);
503 drm_gem_object_unreference_unlocked(obj);
504 if (ret)
505 return ret;
506
507 args->handle = handle;
508 args->size = obj->size;
509
510 return 0;
511}
512
513
514
515
516
517void
518drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
519{
520 idr_init(&file_private->object_idr);
521 spin_lock_init(&file_private->table_lock);
522}
523
524
525
526
527
528static int
529drm_gem_object_release_handle(int id, void *ptr, void *data)
530{
531 struct drm_file *file_priv = data;
532 struct drm_gem_object *obj = ptr;
533 struct drm_device *dev = obj->dev;
534
535 if (obj->import_attach)
536 drm_prime_remove_imported_buf_handle(&file_priv->prime,
537 obj->import_attach->dmabuf);
538
539 if (dev->driver->gem_close_object)
540 dev->driver->gem_close_object(obj, file_priv);
541
542 drm_gem_object_handle_unreference_unlocked(obj);
543
544 return 0;
545}
546
547
548
549
550
551
552void
553drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
554{
555 idr_for_each(&file_private->object_idr,
556 &drm_gem_object_release_handle, file_private);
557
558 idr_remove_all(&file_private->object_idr);
559 idr_destroy(&file_private->object_idr);
560}
561
562void
563drm_gem_object_release(struct drm_gem_object *obj)
564{
565 if (obj->filp)
566 fput(obj->filp);
567}
568EXPORT_SYMBOL(drm_gem_object_release);
569
570
571
572
573
574
575
576void
577drm_gem_object_free(struct kref *kref)
578{
579 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
580 struct drm_device *dev = obj->dev;
581
582 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
583
584 if (dev->driver->gem_free_object != NULL)
585 dev->driver->gem_free_object(obj);
586}
587EXPORT_SYMBOL(drm_gem_object_free);
588
589static void drm_gem_object_ref_bug(struct kref *list_kref)
590{
591 BUG();
592}
593
594
595
596
597
598
599
600
601void drm_gem_object_handle_free(struct drm_gem_object *obj)
602{
603 struct drm_device *dev = obj->dev;
604
605
606 spin_lock(&dev->object_name_lock);
607 if (obj->name) {
608 idr_remove(&dev->object_name_idr, obj->name);
609 obj->name = 0;
610 spin_unlock(&dev->object_name_lock);
611
612
613
614
615
616
617 kref_put(&obj->refcount, drm_gem_object_ref_bug);
618 } else
619 spin_unlock(&dev->object_name_lock);
620
621}
622EXPORT_SYMBOL(drm_gem_object_handle_free);
623
624void drm_gem_vm_open(struct vm_area_struct *vma)
625{
626 struct drm_gem_object *obj = vma->vm_private_data;
627
628 drm_gem_object_reference(obj);
629
630 mutex_lock(&obj->dev->struct_mutex);
631 drm_vm_open_locked(vma);
632 mutex_unlock(&obj->dev->struct_mutex);
633}
634EXPORT_SYMBOL(drm_gem_vm_open);
635
636void drm_gem_vm_close(struct vm_area_struct *vma)
637{
638 struct drm_gem_object *obj = vma->vm_private_data;
639 struct drm_device *dev = obj->dev;
640
641 mutex_lock(&dev->struct_mutex);
642 drm_vm_close_locked(vma);
643 drm_gem_object_unreference(obj);
644 mutex_unlock(&dev->struct_mutex);
645}
646EXPORT_SYMBOL(drm_gem_vm_close);
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
664{
665 struct drm_file *priv = filp->private_data;
666 struct drm_device *dev = priv->minor->dev;
667 struct drm_gem_mm *mm = dev->mm_private;
668 struct drm_local_map *map = NULL;
669 struct drm_gem_object *obj;
670 struct drm_hash_item *hash;
671 int ret = 0;
672
673 if (drm_device_is_unplugged(dev))
674 return -ENODEV;
675
676 mutex_lock(&dev->struct_mutex);
677
678 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
679 mutex_unlock(&dev->struct_mutex);
680 return drm_mmap(filp, vma);
681 }
682
683 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
684 if (!map ||
685 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
686 ret = -EPERM;
687 goto out_unlock;
688 }
689
690
691 if (map->size < vma->vm_end - vma->vm_start) {
692 ret = -EINVAL;
693 goto out_unlock;
694 }
695
696 obj = map->handle;
697 if (!obj->dev->driver->gem_vm_ops) {
698 ret = -EINVAL;
699 goto out_unlock;
700 }
701
702 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
703 vma->vm_ops = obj->dev->driver->gem_vm_ops;
704 vma->vm_private_data = map->handle;
705 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
706
707
708
709
710
711
712
713 drm_gem_object_reference(obj);
714
715 drm_vm_open_locked(vma);
716
717out_unlock:
718 mutex_unlock(&dev->struct_mutex);
719
720 return ret;
721}
722EXPORT_SYMBOL(drm_gem_mmap);
723