1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/export.h>
24#include <linux/dma-buf.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29#include <drm/drm_gem_cma_helper.h>
30#include <drm/drm_vma_manager.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static struct drm_gem_cma_object *
56__drm_gem_cma_create(struct drm_device *drm, size_t size)
57{
58 struct drm_gem_cma_object *cma_obj;
59 struct drm_gem_object *gem_obj;
60 int ret;
61
62 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
63 if (!cma_obj)
64 return ERR_PTR(-ENOMEM);
65
66 gem_obj = &cma_obj->base;
67
68 ret = drm_gem_object_init(drm, gem_obj, size);
69 if (ret)
70 goto error;
71
72 ret = drm_gem_create_mmap_offset(gem_obj);
73 if (ret) {
74 drm_gem_object_release(gem_obj);
75 goto error;
76 }
77
78 return cma_obj;
79
80error:
81 kfree(cma_obj);
82 return ERR_PTR(ret);
83}
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
99 size_t size)
100{
101 struct drm_gem_cma_object *cma_obj;
102 int ret;
103
104 size = round_up(size, PAGE_SIZE);
105
106 cma_obj = __drm_gem_cma_create(drm, size);
107 if (IS_ERR(cma_obj))
108 return cma_obj;
109
110 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
111 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
112 if (!cma_obj->vaddr) {
113 dev_err(drm->dev, "failed to allocate buffer with size %d\n",
114 size);
115 ret = -ENOMEM;
116 goto error;
117 }
118
119 return cma_obj;
120
121error:
122 drm_gem_cma_free_object(&cma_obj->base);
123 return ERR_PTR(ret);
124}
125EXPORT_SYMBOL_GPL(drm_gem_cma_create);
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static struct drm_gem_cma_object *
144drm_gem_cma_create_with_handle(struct drm_file *file_priv,
145 struct drm_device *drm, size_t size,
146 uint32_t *handle)
147{
148 struct drm_gem_cma_object *cma_obj;
149 struct drm_gem_object *gem_obj;
150 int ret;
151
152 cma_obj = drm_gem_cma_create(drm, size);
153 if (IS_ERR(cma_obj))
154 return cma_obj;
155
156 gem_obj = &cma_obj->base;
157
158
159
160
161
162 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
163 if (ret)
164 goto err_handle_create;
165
166
167 drm_gem_object_unreference_unlocked(gem_obj);
168
169 return cma_obj;
170
171err_handle_create:
172 drm_gem_cma_free_object(gem_obj);
173
174 return ERR_PTR(ret);
175}
176
177
178
179
180
181
182
183
184
185
186void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
187{
188 struct drm_gem_cma_object *cma_obj;
189
190 cma_obj = to_drm_gem_cma_obj(gem_obj);
191
192 if (cma_obj->vaddr) {
193 dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
194 cma_obj->vaddr, cma_obj->paddr);
195 } else if (gem_obj->import_attach) {
196 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
197 }
198
199 drm_gem_object_release(gem_obj);
200
201 kfree(cma_obj);
202}
203EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
220 struct drm_device *drm,
221 struct drm_mode_create_dumb *args)
222{
223 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
224 struct drm_gem_cma_object *cma_obj;
225
226 if (args->pitch < min_pitch)
227 args->pitch = min_pitch;
228
229 if (args->size < args->pitch * args->height)
230 args->size = args->pitch * args->height;
231
232 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
233 &args->handle);
234 return PTR_ERR_OR_ZERO(cma_obj);
235}
236EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256int drm_gem_cma_dumb_create(struct drm_file *file_priv,
257 struct drm_device *drm,
258 struct drm_mode_create_dumb *args)
259{
260 struct drm_gem_cma_object *cma_obj;
261
262 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
263 args->size = args->pitch * args->height;
264
265 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
266 &args->handle);
267 return PTR_ERR_OR_ZERO(cma_obj);
268}
269EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
287 struct drm_device *drm, u32 handle,
288 u64 *offset)
289{
290 struct drm_gem_object *gem_obj;
291
292 mutex_lock(&drm->struct_mutex);
293
294 gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
295 if (!gem_obj) {
296 dev_err(drm->dev, "failed to lookup GEM object\n");
297 mutex_unlock(&drm->struct_mutex);
298 return -EINVAL;
299 }
300
301 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
302
303 drm_gem_object_unreference(gem_obj);
304
305 mutex_unlock(&drm->struct_mutex);
306
307 return 0;
308}
309EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
310
311const struct vm_operations_struct drm_gem_cma_vm_ops = {
312 .open = drm_gem_vm_open,
313 .close = drm_gem_vm_close,
314};
315EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
316
317static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
318 struct vm_area_struct *vma)
319{
320 int ret;
321
322
323
324
325
326
327 vma->vm_flags &= ~VM_PFNMAP;
328 vma->vm_pgoff = 0;
329
330 ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
331 cma_obj->vaddr, cma_obj->paddr,
332 vma->vm_end - vma->vm_start);
333 if (ret)
334 drm_gem_vm_close(vma);
335
336 return ret;
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
355{
356 struct drm_gem_cma_object *cma_obj;
357 struct drm_gem_object *gem_obj;
358 int ret;
359
360 ret = drm_gem_mmap(filp, vma);
361 if (ret)
362 return ret;
363
364 gem_obj = vma->vm_private_data;
365 cma_obj = to_drm_gem_cma_obj(gem_obj);
366
367 return drm_gem_cma_mmap_obj(cma_obj, vma);
368}
369EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
370
371#ifdef CONFIG_DEBUG_FS
372
373
374
375
376
377
378
379
380void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
381 struct seq_file *m)
382{
383 struct drm_gem_object *obj = &cma_obj->base;
384 struct drm_device *dev = obj->dev;
385 uint64_t off;
386
387 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
388
389 off = drm_vma_node_start(&obj->vma_node);
390
391 seq_printf(m, "%2d (%2d) %08llx %pad %p %d",
392 obj->name, obj->refcount.refcount.counter,
393 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
394
395 seq_printf(m, "\n");
396}
397EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
398#endif
399
400
401
402
403
404
405
406
407
408
409
410
411
412struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
413{
414 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
415 struct sg_table *sgt;
416 int ret;
417
418 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
419 if (!sgt)
420 return NULL;
421
422 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
423 cma_obj->paddr, obj->size);
424 if (ret < 0)
425 goto out;
426
427 return sgt;
428
429out:
430 kfree(sgt);
431 return NULL;
432}
433EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452struct drm_gem_object *
453drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
454 struct dma_buf_attachment *attach,
455 struct sg_table *sgt)
456{
457 struct drm_gem_cma_object *cma_obj;
458
459 if (sgt->nents != 1)
460 return ERR_PTR(-EINVAL);
461
462
463 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
464 if (IS_ERR(cma_obj))
465 return ERR_CAST(cma_obj);
466
467 cma_obj->paddr = sg_dma_address(sgt->sgl);
468 cma_obj->sgt = sgt;
469
470 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
471
472 return &cma_obj->base;
473}
474EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
475
476
477
478
479
480
481
482
483
484
485
486
487
488int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
489 struct vm_area_struct *vma)
490{
491 struct drm_gem_cma_object *cma_obj;
492 struct drm_device *dev = obj->dev;
493 int ret;
494
495 mutex_lock(&dev->struct_mutex);
496 ret = drm_gem_mmap_obj(obj, obj->size, vma);
497 mutex_unlock(&dev->struct_mutex);
498 if (ret < 0)
499 return ret;
500
501 cma_obj = to_drm_gem_cma_obj(obj);
502 return drm_gem_cma_mmap_obj(cma_obj, vma);
503}
504EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
521{
522 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
523
524 return cma_obj->vaddr;
525}
526EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
527
528
529
530
531
532
533
534
535
536
537
538
539void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
540{
541
542}
543EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
544