1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/module.h>
34#include <linux/ramfs.h>
35#include <linux/shmem_fs.h>
36#include <linux/dma-buf.h>
37#include "vgem_drv.h"
38
39#define DRIVER_NAME "vgem"
40#define DRIVER_DESC "Virtual GEM provider"
41#define DRIVER_DATE "20120112"
42#define DRIVER_MAJOR 1
43#define DRIVER_MINOR 0
44
45static void vgem_gem_free_object(struct drm_gem_object *obj)
46{
47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
48
49 drm_gem_object_release(obj);
50 kfree(vgem_obj);
51}
52
53static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
54{
55 struct drm_vgem_gem_object *obj = vma->vm_private_data;
56
57 unsigned long vaddr = (unsigned long)vmf->virtual_address;
58 struct page *page;
59
60 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
61 (vaddr - vma->vm_start) >> PAGE_SHIFT);
62 if (!IS_ERR(page)) {
63 vmf->page = page;
64 return 0;
65 } else switch (PTR_ERR(page)) {
66 case -ENOSPC:
67 case -ENOMEM:
68 return VM_FAULT_OOM;
69 case -EBUSY:
70 return VM_FAULT_RETRY;
71 case -EFAULT:
72 case -EINVAL:
73 return VM_FAULT_SIGBUS;
74 default:
75 WARN_ON_ONCE(PTR_ERR(page));
76 return VM_FAULT_SIGBUS;
77 }
78}
79
80static const struct vm_operations_struct vgem_gem_vm_ops = {
81 .fault = vgem_gem_fault,
82 .open = drm_gem_vm_open,
83 .close = drm_gem_vm_close,
84};
85
86static int vgem_open(struct drm_device *dev, struct drm_file *file)
87{
88 struct vgem_file *vfile;
89 int ret;
90
91 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
92 if (!vfile)
93 return -ENOMEM;
94
95 file->driver_priv = vfile;
96
97 ret = vgem_fence_open(vfile);
98 if (ret) {
99 kfree(vfile);
100 return ret;
101 }
102
103 return 0;
104}
105
106static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
107{
108 struct vgem_file *vfile = file->driver_priv;
109
110 vgem_fence_close(vfile);
111 kfree(vfile);
112}
113
114
115
116static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
117 struct drm_file *file,
118 unsigned int *handle,
119 unsigned long size)
120{
121 struct drm_vgem_gem_object *obj;
122 int ret;
123
124 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
125 if (!obj)
126 return ERR_PTR(-ENOMEM);
127
128 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
129 if (ret)
130 goto err_free;
131
132 ret = drm_gem_handle_create(file, &obj->base, handle);
133 drm_gem_object_unreference_unlocked(&obj->base);
134 if (ret)
135 goto err;
136
137 return &obj->base;
138
139err_free:
140 kfree(obj);
141err:
142 return ERR_PTR(ret);
143}
144
145static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
146 struct drm_mode_create_dumb *args)
147{
148 struct drm_gem_object *gem_object;
149 u64 pitch, size;
150
151 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
152 size = args->height * pitch;
153 if (size == 0)
154 return -EINVAL;
155
156 gem_object = vgem_gem_create(dev, file, &args->handle, size);
157 if (IS_ERR(gem_object))
158 return PTR_ERR(gem_object);
159
160 args->size = gem_object->size;
161 args->pitch = pitch;
162
163 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
164
165 return 0;
166}
167
168static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
169 uint32_t handle, uint64_t *offset)
170{
171 struct drm_gem_object *obj;
172 int ret;
173
174 obj = drm_gem_object_lookup(file, handle);
175 if (!obj)
176 return -ENOENT;
177
178 if (!obj->filp) {
179 ret = -EINVAL;
180 goto unref;
181 }
182
183 ret = drm_gem_create_mmap_offset(obj);
184 if (ret)
185 goto unref;
186
187 *offset = drm_vma_node_offset_addr(&obj->vma_node);
188unref:
189 drm_gem_object_unreference_unlocked(obj);
190
191 return ret;
192}
193
194static struct drm_ioctl_desc vgem_ioctls[] = {
195 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
196 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
197};
198
199static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
200{
201 unsigned long flags = vma->vm_flags;
202 int ret;
203
204 ret = drm_gem_mmap(filp, vma);
205 if (ret)
206 return ret;
207
208
209
210
211 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
212 return 0;
213}
214
215static const struct file_operations vgem_driver_fops = {
216 .owner = THIS_MODULE,
217 .open = drm_open,
218 .mmap = vgem_mmap,
219 .poll = drm_poll,
220 .read = drm_read,
221 .unlocked_ioctl = drm_ioctl,
222 .release = drm_release,
223};
224
225static int vgem_prime_pin(struct drm_gem_object *obj)
226{
227 long n_pages = obj->size >> PAGE_SHIFT;
228 struct page **pages;
229
230
231
232
233 pages = drm_gem_get_pages(obj);
234 if (IS_ERR(pages))
235 return PTR_ERR(pages);
236
237 drm_clflush_pages(pages, n_pages);
238 drm_gem_put_pages(obj, pages, true, false);
239
240 return 0;
241}
242
243static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
244{
245 struct sg_table *st;
246 struct page **pages;
247
248 pages = drm_gem_get_pages(obj);
249 if (IS_ERR(pages))
250 return ERR_CAST(pages);
251
252 st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
253 drm_gem_put_pages(obj, pages, false, false);
254
255 return st;
256}
257
258static void *vgem_prime_vmap(struct drm_gem_object *obj)
259{
260 long n_pages = obj->size >> PAGE_SHIFT;
261 struct page **pages;
262 void *addr;
263
264 pages = drm_gem_get_pages(obj);
265 if (IS_ERR(pages))
266 return NULL;
267
268 addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
269 drm_gem_put_pages(obj, pages, false, false);
270
271 return addr;
272}
273
274static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
275{
276 vunmap(vaddr);
277}
278
279static int vgem_prime_mmap(struct drm_gem_object *obj,
280 struct vm_area_struct *vma)
281{
282 int ret;
283
284 if (obj->size < vma->vm_end - vma->vm_start)
285 return -EINVAL;
286
287 if (!obj->filp)
288 return -ENODEV;
289
290 ret = obj->filp->f_op->mmap(obj->filp, vma);
291 if (ret)
292 return ret;
293
294 fput(vma->vm_file);
295 vma->vm_file = get_file(obj->filp);
296 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
297 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
298
299 return 0;
300}
301
302static struct drm_driver vgem_driver = {
303 .driver_features = DRIVER_GEM | DRIVER_PRIME,
304 .open = vgem_open,
305 .preclose = vgem_preclose,
306 .gem_free_object_unlocked = vgem_gem_free_object,
307 .gem_vm_ops = &vgem_gem_vm_ops,
308 .ioctls = vgem_ioctls,
309 .num_ioctls = ARRAY_SIZE(vgem_ioctls),
310 .fops = &vgem_driver_fops,
311
312 .dumb_create = vgem_gem_dumb_create,
313 .dumb_map_offset = vgem_gem_dumb_map,
314
315 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
316 .gem_prime_pin = vgem_prime_pin,
317 .gem_prime_export = drm_gem_prime_export,
318 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
319 .gem_prime_vmap = vgem_prime_vmap,
320 .gem_prime_vunmap = vgem_prime_vunmap,
321 .gem_prime_mmap = vgem_prime_mmap,
322
323 .name = DRIVER_NAME,
324 .desc = DRIVER_DESC,
325 .date = DRIVER_DATE,
326 .major = DRIVER_MAJOR,
327 .minor = DRIVER_MINOR,
328};
329
330static struct drm_device *vgem_device;
331
332static int __init vgem_init(void)
333{
334 int ret;
335
336 vgem_device = drm_dev_alloc(&vgem_driver, NULL);
337 if (IS_ERR(vgem_device)) {
338 ret = PTR_ERR(vgem_device);
339 goto out;
340 }
341
342 ret = drm_dev_register(vgem_device, 0);
343 if (ret)
344 goto out_unref;
345
346 return 0;
347
348out_unref:
349 drm_dev_unref(vgem_device);
350out:
351 return ret;
352}
353
354static void __exit vgem_exit(void)
355{
356 drm_dev_unregister(vgem_device);
357 drm_dev_unref(vgem_device);
358}
359
360module_init(vgem_init);
361module_exit(vgem_exit);
362
363MODULE_AUTHOR("Red Hat, Inc.");
364MODULE_AUTHOR("Intel Corporation");
365MODULE_DESCRIPTION(DRIVER_DESC);
366MODULE_LICENSE("GPL and additional rights");
367