1
2
3#include <drm/drm_gem_vram_helper.h>
4#include <drm/drm_device.h>
5#include <drm/drm_mode.h>
6#include <drm/drm_prime.h>
7#include <drm/drm_vram_mm_helper.h>
8#include <drm/ttm/ttm_page_alloc.h>
9
10static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
11
12
13
14
15
16
17
18
19
20
21
22
23static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
24{
25
26
27
28
29 drm_gem_object_release(&gbo->bo.base);
30}
31
32static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
33{
34 drm_gem_vram_cleanup(gbo);
35 kfree(gbo);
36}
37
38static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
39{
40 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
41
42 drm_gem_vram_destroy(gbo);
43}
44
45static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
46 unsigned long pl_flag)
47{
48 unsigned int i;
49 unsigned int c = 0;
50
51 gbo->placement.placement = gbo->placements;
52 gbo->placement.busy_placement = gbo->placements;
53
54 if (pl_flag & TTM_PL_FLAG_VRAM)
55 gbo->placements[c++].flags = TTM_PL_FLAG_WC |
56 TTM_PL_FLAG_UNCACHED |
57 TTM_PL_FLAG_VRAM;
58
59 if (pl_flag & TTM_PL_FLAG_SYSTEM)
60 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
61 TTM_PL_FLAG_SYSTEM;
62
63 if (!c)
64 gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
65 TTM_PL_FLAG_SYSTEM;
66
67 gbo->placement.num_placement = c;
68 gbo->placement.num_busy_placement = c;
69
70 for (i = 0; i < c; ++i) {
71 gbo->placements[i].fpfn = 0;
72 gbo->placements[i].lpfn = 0;
73 }
74}
75
76static int drm_gem_vram_init(struct drm_device *dev,
77 struct ttm_bo_device *bdev,
78 struct drm_gem_vram_object *gbo,
79 size_t size, unsigned long pg_align,
80 bool interruptible)
81{
82 int ret;
83 size_t acc_size;
84
85 if (!gbo->bo.base.funcs)
86 gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
87
88 ret = drm_gem_object_init(dev, &gbo->bo.base, size);
89 if (ret)
90 return ret;
91
92 acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
93
94 gbo->bo.bdev = bdev;
95 drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
96
97 ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
98 &gbo->placement, pg_align, interruptible, acc_size,
99 NULL, NULL, ttm_buffer_object_destroy);
100 if (ret)
101 goto err_drm_gem_object_release;
102
103 return 0;
104
105err_drm_gem_object_release:
106 drm_gem_object_release(&gbo->bo.base);
107 return ret;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
123 struct ttm_bo_device *bdev,
124 size_t size,
125 unsigned long pg_align,
126 bool interruptible)
127{
128 struct drm_gem_vram_object *gbo;
129 int ret;
130
131 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
132 if (!gbo)
133 return ERR_PTR(-ENOMEM);
134
135 ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
136 if (ret < 0)
137 goto err_kfree;
138
139 return gbo;
140
141err_kfree:
142 kfree(gbo);
143 return ERR_PTR(ret);
144}
145EXPORT_SYMBOL(drm_gem_vram_create);
146
147
148
149
150
151
152
153void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
154{
155 ttm_bo_put(&gbo->bo);
156}
157EXPORT_SYMBOL(drm_gem_vram_put);
158
159
160
161
162
163
164
165
166
167
168
169u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
170{
171 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
172}
173EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
174
175
176
177
178
179
180
181
182
183
184
185
186
187s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
188{
189 if (WARN_ON_ONCE(!gbo->pin_count))
190 return (s64)-ENODEV;
191 return gbo->bo.offset;
192}
193EXPORT_SYMBOL(drm_gem_vram_offset);
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
211{
212 int i, ret;
213 struct ttm_operation_ctx ctx = { false, false };
214
215 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
216 if (ret < 0)
217 return ret;
218
219 if (gbo->pin_count)
220 goto out;
221
222 if (pl_flag)
223 drm_gem_vram_placement(gbo, pl_flag);
224
225 for (i = 0; i < gbo->placement.num_placement; ++i)
226 gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
227
228 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
229 if (ret < 0)
230 goto err_ttm_bo_unreserve;
231
232out:
233 ++gbo->pin_count;
234 ttm_bo_unreserve(&gbo->bo);
235
236 return 0;
237
238err_ttm_bo_unreserve:
239 ttm_bo_unreserve(&gbo->bo);
240 return ret;
241}
242EXPORT_SYMBOL(drm_gem_vram_pin);
243
244
245
246
247
248
249
250
251
252int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
253{
254 int i, ret;
255 struct ttm_operation_ctx ctx = { false, false };
256
257 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
258 if (ret < 0)
259 return ret;
260
261 if (WARN_ON_ONCE(!gbo->pin_count))
262 goto out;
263
264 --gbo->pin_count;
265 if (gbo->pin_count)
266 goto out;
267
268 for (i = 0; i < gbo->placement.num_placement ; ++i)
269 gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
270
271 ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
272 if (ret < 0)
273 goto err_ttm_bo_unreserve;
274
275out:
276 ttm_bo_unreserve(&gbo->bo);
277
278 return 0;
279
280err_ttm_bo_unreserve:
281 ttm_bo_unreserve(&gbo->bo);
282 return ret;
283}
284EXPORT_SYMBOL(drm_gem_vram_unpin);
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
304 bool *is_iomem)
305{
306 int ret;
307 struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
308
309 if (kmap->virtual || !map)
310 goto out;
311
312 ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
313 if (ret)
314 return ERR_PTR(ret);
315
316out:
317 if (!is_iomem)
318 return kmap->virtual;
319 if (!kmap->virtual) {
320 *is_iomem = false;
321 return NULL;
322 }
323 return ttm_kmap_obj_virtual(kmap, is_iomem);
324}
325EXPORT_SYMBOL(drm_gem_vram_kmap);
326
327
328
329
330
331void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
332{
333 struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
334
335 if (!kmap->virtual)
336 return;
337
338 ttm_bo_kunmap(kmap);
339 kmap->virtual = NULL;
340}
341EXPORT_SYMBOL(drm_gem_vram_kunmap);
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363int drm_gem_vram_fill_create_dumb(struct drm_file *file,
364 struct drm_device *dev,
365 struct ttm_bo_device *bdev,
366 unsigned long pg_align,
367 bool interruptible,
368 struct drm_mode_create_dumb *args)
369{
370 size_t pitch, size;
371 struct drm_gem_vram_object *gbo;
372 int ret;
373 u32 handle;
374
375 pitch = args->width * ((args->bpp + 7) / 8);
376 size = pitch * args->height;
377
378 size = roundup(size, PAGE_SIZE);
379 if (!size)
380 return -EINVAL;
381
382 gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
383 if (IS_ERR(gbo))
384 return PTR_ERR(gbo);
385
386 ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
387 if (ret)
388 goto err_drm_gem_object_put_unlocked;
389
390 drm_gem_object_put_unlocked(&gbo->bo.base);
391
392 args->pitch = pitch;
393 args->size = size;
394 args->handle = handle;
395
396 return 0;
397
398err_drm_gem_object_put_unlocked:
399 drm_gem_object_put_unlocked(&gbo->bo.base);
400 return ret;
401}
402EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
403
404
405
406
407
408static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
409{
410 return (bo->destroy == ttm_buffer_object_destroy);
411}
412
413
414
415
416
417
418
419void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
420 struct ttm_placement *pl)
421{
422 struct drm_gem_vram_object *gbo;
423
424
425 if (!drm_is_gem_vram(bo))
426 return;
427
428 gbo = drm_gem_vram_of_bo(bo);
429 drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
430 *pl = gbo->placement;
431}
432EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
433
434
435
436
437
438
439
440
441
442
443
444int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
445 struct file *filp)
446{
447 struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
448
449 return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
450 filp->private_data);
451}
452EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
453
454
455
456
457
458
459
460
461const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
462 .evict_flags = drm_gem_vram_bo_driver_evict_flags,
463 .verify_access = drm_gem_vram_bo_driver_verify_access
464};
465EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
466
467
468
469
470
471
472
473
474
475
476static void drm_gem_vram_object_free(struct drm_gem_object *gem)
477{
478 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
479
480 drm_gem_vram_put(gbo);
481}
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502int drm_gem_vram_driver_dumb_create(struct drm_file *file,
503 struct drm_device *dev,
504 struct drm_mode_create_dumb *args)
505{
506 if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
507 return -EINVAL;
508
509 return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
510 false, args);
511}
512EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
513
514
515
516
517
518
519
520
521
522
523
524
525
526int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
527 struct drm_device *dev,
528 uint32_t handle, uint64_t *offset)
529{
530 struct drm_gem_object *gem;
531 struct drm_gem_vram_object *gbo;
532
533 gem = drm_gem_object_lookup(file, handle);
534 if (!gem)
535 return -ENOENT;
536
537 gbo = drm_gem_vram_of_gem(gem);
538 *offset = drm_gem_vram_mmap_offset(gbo);
539
540 drm_gem_object_put_unlocked(gem);
541
542 return 0;
543}
544EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
560{
561 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
562
563
564
565
566
567
568
569
570
571 return drm_gem_vram_pin(gbo, 0);
572}
573
574
575
576
577
578
579static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
580{
581 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
582
583 drm_gem_vram_unpin(gbo);
584}
585
586
587
588
589
590
591
592
593
594
595static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
596{
597 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
598 int ret;
599 void *base;
600
601 ret = drm_gem_vram_pin(gbo, 0);
602 if (ret)
603 return NULL;
604 base = drm_gem_vram_kmap(gbo, true, NULL);
605 if (IS_ERR(base)) {
606 drm_gem_vram_unpin(gbo);
607 return NULL;
608 }
609 return base;
610}
611
612
613
614
615
616
617
618static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
619 void *vaddr)
620{
621 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
622
623 drm_gem_vram_kunmap(gbo);
624 drm_gem_vram_unpin(gbo);
625}
626
627
628
629
630
631static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
632 .free = drm_gem_vram_object_free,
633 .pin = drm_gem_vram_object_pin,
634 .unpin = drm_gem_vram_object_unpin,
635 .vmap = drm_gem_vram_object_vmap,
636 .vunmap = drm_gem_vram_object_vunmap
637};
638