1
2
3#include <linux/shmem_fs.h>
4
5#include "vkms_drv.h"
6
7static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
8 u64 size)
9{
10 struct vkms_gem_object *obj;
11 int ret;
12
13 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
14 if (!obj)
15 return ERR_PTR(-ENOMEM);
16
17 size = roundup(size, PAGE_SIZE);
18 ret = drm_gem_object_init(dev, &obj->gem, size);
19 if (ret) {
20 kfree(obj);
21 return ERR_PTR(ret);
22 }
23
24 mutex_init(&obj->pages_lock);
25
26 return obj;
27}
28
29void vkms_gem_free_object(struct drm_gem_object *obj)
30{
31 struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
32 gem);
33
34 WARN_ON(gem->pages);
35 WARN_ON(gem->vaddr);
36
37 mutex_destroy(&gem->pages_lock);
38 drm_gem_object_release(obj);
39 kfree(gem);
40}
41
42vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
43{
44 struct vm_area_struct *vma = vmf->vma;
45 struct vkms_gem_object *obj = vma->vm_private_data;
46 unsigned long vaddr = vmf->address;
47 pgoff_t page_offset;
48 loff_t num_pages;
49 vm_fault_t ret = VM_FAULT_SIGBUS;
50
51 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
52 num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
53
54 if (page_offset > num_pages)
55 return VM_FAULT_SIGBUS;
56
57 mutex_lock(&obj->pages_lock);
58 if (obj->pages) {
59 get_page(obj->pages[page_offset]);
60 vmf->page = obj->pages[page_offset];
61 ret = 0;
62 }
63 mutex_unlock(&obj->pages_lock);
64 if (ret) {
65 struct page *page;
66 struct address_space *mapping;
67
68 mapping = file_inode(obj->gem.filp)->i_mapping;
69 page = shmem_read_mapping_page(mapping, page_offset);
70
71 if (!IS_ERR(page)) {
72 vmf->page = page;
73 ret = 0;
74 } else {
75 switch (PTR_ERR(page)) {
76 case -ENOSPC:
77 case -ENOMEM:
78 ret = VM_FAULT_OOM;
79 break;
80 case -EBUSY:
81 ret = VM_FAULT_RETRY;
82 break;
83 case -EFAULT:
84 case -EINVAL:
85 ret = VM_FAULT_SIGBUS;
86 break;
87 default:
88 WARN_ON(PTR_ERR(page));
89 ret = VM_FAULT_SIGBUS;
90 break;
91 }
92 }
93 }
94 return ret;
95}
96
97struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
98 struct drm_file *file,
99 u32 *handle,
100 u64 size)
101{
102 struct vkms_gem_object *obj;
103 int ret;
104
105 if (!file || !dev || !handle)
106 return ERR_PTR(-EINVAL);
107
108 obj = __vkms_gem_create(dev, size);
109 if (IS_ERR(obj))
110 return ERR_CAST(obj);
111
112 ret = drm_gem_handle_create(file, &obj->gem, handle);
113 drm_gem_object_put_unlocked(&obj->gem);
114 if (ret) {
115 drm_gem_object_release(&obj->gem);
116 kfree(obj);
117 return ERR_PTR(ret);
118 }
119
120 return &obj->gem;
121}
122
123int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
124 struct drm_mode_create_dumb *args)
125{
126 struct drm_gem_object *gem_obj;
127 u64 pitch, size;
128
129 if (!args || !dev || !file)
130 return -EINVAL;
131
132 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
133 size = pitch * args->height;
134
135 if (!size)
136 return -EINVAL;
137
138 gem_obj = vkms_gem_create(dev, file, &args->handle, size);
139 if (IS_ERR(gem_obj))
140 return PTR_ERR(gem_obj);
141
142 args->size = gem_obj->size;
143 args->pitch = pitch;
144
145 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
146
147 return 0;
148}
149
150static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
151{
152 struct drm_gem_object *gem_obj = &vkms_obj->gem;
153
154 if (!vkms_obj->pages) {
155 struct page **pages = drm_gem_get_pages(gem_obj);
156
157 if (IS_ERR(pages))
158 return pages;
159
160 if (cmpxchg(&vkms_obj->pages, NULL, pages))
161 drm_gem_put_pages(gem_obj, pages, false, true);
162 }
163
164 return vkms_obj->pages;
165}
166
167void vkms_gem_vunmap(struct drm_gem_object *obj)
168{
169 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
170
171 mutex_lock(&vkms_obj->pages_lock);
172 if (vkms_obj->vmap_count < 1) {
173 WARN_ON(vkms_obj->vaddr);
174 WARN_ON(vkms_obj->pages);
175 mutex_unlock(&vkms_obj->pages_lock);
176 return;
177 }
178
179 vkms_obj->vmap_count--;
180
181 if (vkms_obj->vmap_count == 0) {
182 vunmap(vkms_obj->vaddr);
183 vkms_obj->vaddr = NULL;
184 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
185 vkms_obj->pages = NULL;
186 }
187
188 mutex_unlock(&vkms_obj->pages_lock);
189}
190
191int vkms_gem_vmap(struct drm_gem_object *obj)
192{
193 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
194 int ret = 0;
195
196 mutex_lock(&vkms_obj->pages_lock);
197
198 if (!vkms_obj->vaddr) {
199 unsigned int n_pages = obj->size >> PAGE_SHIFT;
200 struct page **pages = _get_pages(vkms_obj);
201
202 if (IS_ERR(pages)) {
203 ret = PTR_ERR(pages);
204 goto out;
205 }
206
207 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
208 if (!vkms_obj->vaddr)
209 goto err_vmap;
210 }
211
212 vkms_obj->vmap_count++;
213 goto out;
214
215err_vmap:
216 ret = -ENOMEM;
217 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
218 vkms_obj->pages = NULL;
219out:
220 mutex_unlock(&vkms_obj->pages_lock);
221 return ret;
222}
223