1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/export.h>
24#include <linux/dma-buf.h>
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm.h>
29#include <drm/drm_gem_cma_helper.h>
30#include <drm/drm_vma_manager.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static struct drm_gem_cma_object *
56__drm_gem_cma_create(struct drm_device *drm, size_t size)
57{
58 struct drm_gem_cma_object *cma_obj;
59 struct drm_gem_object *gem_obj;
60 int ret;
61
62 if (drm->driver->gem_create_object)
63 gem_obj = drm->driver->gem_create_object(drm, size);
64 else
65 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
66 if (!gem_obj)
67 return ERR_PTR(-ENOMEM);
68 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
69
70 ret = drm_gem_object_init(drm, gem_obj, size);
71 if (ret)
72 goto error;
73
74 ret = drm_gem_create_mmap_offset(gem_obj);
75 if (ret) {
76 drm_gem_object_release(gem_obj);
77 goto error;
78 }
79
80 return cma_obj;
81
82error:
83 kfree(cma_obj);
84 return ERR_PTR(ret);
85}
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
101 size_t size)
102{
103 struct drm_gem_cma_object *cma_obj;
104 int ret;
105
106 size = round_up(size, PAGE_SIZE);
107
108 cma_obj = __drm_gem_cma_create(drm, size);
109 if (IS_ERR(cma_obj))
110 return cma_obj;
111
112 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
113 GFP_KERNEL | __GFP_NOWARN);
114 if (!cma_obj->vaddr) {
115 dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
116 size);
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 return cma_obj;
122
123error:
124 drm_gem_object_unreference_unlocked(&cma_obj->base);
125 return ERR_PTR(ret);
126}
127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145static struct drm_gem_cma_object *
146drm_gem_cma_create_with_handle(struct drm_file *file_priv,
147 struct drm_device *drm, size_t size,
148 uint32_t *handle)
149{
150 struct drm_gem_cma_object *cma_obj;
151 struct drm_gem_object *gem_obj;
152 int ret;
153
154 cma_obj = drm_gem_cma_create(drm, size);
155 if (IS_ERR(cma_obj))
156 return cma_obj;
157
158 gem_obj = &cma_obj->base;
159
160
161
162
163
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165
166 drm_gem_object_unreference_unlocked(gem_obj);
167 if (ret)
168 return ERR_PTR(ret);
169
170 return cma_obj;
171}
172
173
174
175
176
177
178
179
180
181
182void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
183{
184 struct drm_gem_cma_object *cma_obj;
185
186 cma_obj = to_drm_gem_cma_obj(gem_obj);
187
188 if (cma_obj->vaddr) {
189 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
190 cma_obj->vaddr, cma_obj->paddr);
191 } else if (gem_obj->import_attach) {
192 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
193 }
194
195 drm_gem_object_release(gem_obj);
196
197 kfree(cma_obj);
198}
199EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
216 struct drm_device *drm,
217 struct drm_mode_create_dumb *args)
218{
219 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
220 struct drm_gem_cma_object *cma_obj;
221
222 if (args->pitch < min_pitch)
223 args->pitch = min_pitch;
224
225 if (args->size < args->pitch * args->height)
226 args->size = args->pitch * args->height;
227
228 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
229 &args->handle);
230 return PTR_ERR_OR_ZERO(cma_obj);
231}
232EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252int drm_gem_cma_dumb_create(struct drm_file *file_priv,
253 struct drm_device *drm,
254 struct drm_mode_create_dumb *args)
255{
256 struct drm_gem_cma_object *cma_obj;
257
258 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
259 args->size = args->pitch * args->height;
260
261 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
262 &args->handle);
263 return PTR_ERR_OR_ZERO(cma_obj);
264}
265EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
283 struct drm_device *drm, u32 handle,
284 u64 *offset)
285{
286 struct drm_gem_object *gem_obj;
287
288 gem_obj = drm_gem_object_lookup(file_priv, handle);
289 if (!gem_obj) {
290 dev_err(drm->dev, "failed to lookup GEM object\n");
291 return -EINVAL;
292 }
293
294 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
295
296 drm_gem_object_unreference_unlocked(gem_obj);
297
298 return 0;
299}
300EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
301
302const struct vm_operations_struct drm_gem_cma_vm_ops = {
303 .open = drm_gem_vm_open,
304 .close = drm_gem_vm_close,
305};
306EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
307
308static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
309 struct vm_area_struct *vma)
310{
311 int ret;
312
313
314
315
316
317
318 vma->vm_flags &= ~VM_PFNMAP;
319 vma->vm_pgoff = 0;
320
321 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
322 cma_obj->paddr, vma->vm_end - vma->vm_start);
323 if (ret)
324 drm_gem_vm_close(vma);
325
326 return ret;
327}
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
345{
346 struct drm_gem_cma_object *cma_obj;
347 struct drm_gem_object *gem_obj;
348 int ret;
349
350 ret = drm_gem_mmap(filp, vma);
351 if (ret)
352 return ret;
353
354 gem_obj = vma->vm_private_data;
355 cma_obj = to_drm_gem_cma_obj(gem_obj);
356
357 return drm_gem_cma_mmap_obj(cma_obj, vma);
358}
359EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
360
361#ifdef CONFIG_DEBUG_FS
362
363
364
365
366
367
368
369
370void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
371 struct seq_file *m)
372{
373 struct drm_gem_object *obj = &cma_obj->base;
374 uint64_t off;
375
376 off = drm_vma_node_start(&obj->vma_node);
377
378 seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
379 obj->name, obj->refcount.refcount.counter,
380 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
381
382 seq_printf(m, "\n");
383}
384EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
385#endif
386
387
388
389
390
391
392
393
394
395
396
397
398
399struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
400{
401 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
402 struct sg_table *sgt;
403 int ret;
404
405 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
406 if (!sgt)
407 return NULL;
408
409 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
410 cma_obj->paddr, obj->size);
411 if (ret < 0)
412 goto out;
413
414 return sgt;
415
416out:
417 kfree(sgt);
418 return NULL;
419}
420EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439struct drm_gem_object *
440drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
441 struct dma_buf_attachment *attach,
442 struct sg_table *sgt)
443{
444 struct drm_gem_cma_object *cma_obj;
445
446 if (sgt->nents != 1)
447 return ERR_PTR(-EINVAL);
448
449
450 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
451 if (IS_ERR(cma_obj))
452 return ERR_CAST(cma_obj);
453
454 cma_obj->paddr = sg_dma_address(sgt->sgl);
455 cma_obj->sgt = sgt;
456
457 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
458
459 return &cma_obj->base;
460}
461EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
462
463
464
465
466
467
468
469
470
471
472
473
474
475int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
476 struct vm_area_struct *vma)
477{
478 struct drm_gem_cma_object *cma_obj;
479 int ret;
480
481 ret = drm_gem_mmap_obj(obj, obj->size, vma);
482 if (ret < 0)
483 return ret;
484
485 cma_obj = to_drm_gem_cma_obj(obj);
486 return drm_gem_cma_mmap_obj(cma_obj, vma);
487}
488EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
505{
506 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
507
508 return cma_obj->vaddr;
509}
510EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
511
512
513
514
515
516
517
518
519
520
521
522
523void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
524{
525
526}
527EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
528