1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/dma-buf.h>
20
21#include "vc4_drv.h"
22#include "uapi/drm/vc4_drm.h"
23
24static const char * const bo_type_names[] = {
25 "kernel",
26 "V3D",
27 "V3D shader",
28 "dumb",
29 "binner",
30 "RCL",
31 "BCL",
32 "kernel BO cache",
33};
34
35static bool is_user_label(int label)
36{
37 return label >= VC4_BO_TYPE_COUNT;
38}
39
40static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
41{
42 int i;
43
44 for (i = 0; i < vc4->num_labels; i++) {
45 if (!vc4->bo_labels[i].num_allocated)
46 continue;
47
48 drm_printf(p, "%30s: %6dkb BOs (%d)\n",
49 vc4->bo_labels[i].name,
50 vc4->bo_labels[i].size_allocated / 1024,
51 vc4->bo_labels[i].num_allocated);
52 }
53
54 mutex_lock(&vc4->purgeable.lock);
55 if (vc4->purgeable.num)
56 drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
57 vc4->purgeable.size / 1024, vc4->purgeable.num);
58
59 if (vc4->purgeable.purged_num)
60 drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
61 vc4->purgeable.purged_size / 1024,
62 vc4->purgeable.purged_num);
63 mutex_unlock(&vc4->purgeable.lock);
64}
65
66static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
67{
68 struct drm_info_node *node = (struct drm_info_node *)m->private;
69 struct drm_device *dev = node->minor->dev;
70 struct vc4_dev *vc4 = to_vc4_dev(dev);
71 struct drm_printer p = drm_seq_file_printer(m);
72
73 vc4_bo_stats_print(&p, vc4);
74
75 return 0;
76}
77
78
79
80
81
82
83
84
85
86static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
87{
88 int i;
89 int free_slot = -1;
90
91 for (i = 0; i < vc4->num_labels; i++) {
92 if (!vc4->bo_labels[i].name) {
93 free_slot = i;
94 } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
95 kfree(name);
96 return i;
97 }
98 }
99
100 if (free_slot != -1) {
101 WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
102 vc4->bo_labels[free_slot].name = name;
103 return free_slot;
104 } else {
105 u32 new_label_count = vc4->num_labels + 1;
106 struct vc4_label *new_labels =
107 krealloc(vc4->bo_labels,
108 new_label_count * sizeof(*new_labels),
109 GFP_KERNEL);
110
111 if (!new_labels) {
112 kfree(name);
113 return -1;
114 }
115
116 free_slot = vc4->num_labels;
117 vc4->bo_labels = new_labels;
118 vc4->num_labels = new_label_count;
119
120 vc4->bo_labels[free_slot].name = name;
121 vc4->bo_labels[free_slot].num_allocated = 0;
122 vc4->bo_labels[free_slot].size_allocated = 0;
123
124 return free_slot;
125 }
126}
127
128static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
129{
130 struct vc4_bo *bo = to_vc4_bo(gem_obj);
131 struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
132
133 lockdep_assert_held(&vc4->bo_lock);
134
135 if (label != -1) {
136 vc4->bo_labels[label].num_allocated++;
137 vc4->bo_labels[label].size_allocated += gem_obj->size;
138 }
139
140 vc4->bo_labels[bo->label].num_allocated--;
141 vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
142
143 if (vc4->bo_labels[bo->label].num_allocated == 0 &&
144 is_user_label(bo->label)) {
145
146
147
148
149
150 kfree(vc4->bo_labels[bo->label].name);
151 vc4->bo_labels[bo->label].name = NULL;
152 }
153
154 bo->label = label;
155}
156
157static uint32_t bo_page_index(size_t size)
158{
159 return (size / PAGE_SIZE) - 1;
160}
161
162static void vc4_bo_destroy(struct vc4_bo *bo)
163{
164 struct drm_gem_object *obj = &bo->base.base;
165 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
166
167 lockdep_assert_held(&vc4->bo_lock);
168
169 vc4_bo_set_label(obj, -1);
170
171 if (bo->validated_shader) {
172 kfree(bo->validated_shader->uniform_addr_offsets);
173 kfree(bo->validated_shader->texture_samples);
174 kfree(bo->validated_shader);
175 bo->validated_shader = NULL;
176 }
177
178 drm_gem_cma_free_object(obj);
179}
180
181static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
182{
183 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
184
185 lockdep_assert_held(&vc4->bo_lock);
186 list_del(&bo->unref_head);
187 list_del(&bo->size_head);
188}
189
190static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
191 size_t size)
192{
193 struct vc4_dev *vc4 = to_vc4_dev(dev);
194 uint32_t page_index = bo_page_index(size);
195
196 if (vc4->bo_cache.size_list_size <= page_index) {
197 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
198 page_index + 1);
199 struct list_head *new_list;
200 uint32_t i;
201
202 new_list = kmalloc_array(new_size, sizeof(struct list_head),
203 GFP_KERNEL);
204 if (!new_list)
205 return NULL;
206
207
208
209
210 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
211 struct list_head *old_list =
212 &vc4->bo_cache.size_list[i];
213
214 if (list_empty(old_list))
215 INIT_LIST_HEAD(&new_list[i]);
216 else
217 list_replace(old_list, &new_list[i]);
218 }
219
220 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
221 INIT_LIST_HEAD(&new_list[i]);
222
223 kfree(vc4->bo_cache.size_list);
224 vc4->bo_cache.size_list = new_list;
225 vc4->bo_cache.size_list_size = new_size;
226 }
227
228 return &vc4->bo_cache.size_list[page_index];
229}
230
231static void vc4_bo_cache_purge(struct drm_device *dev)
232{
233 struct vc4_dev *vc4 = to_vc4_dev(dev);
234
235 mutex_lock(&vc4->bo_lock);
236 while (!list_empty(&vc4->bo_cache.time_list)) {
237 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
238 struct vc4_bo, unref_head);
239 vc4_bo_remove_from_cache(bo);
240 vc4_bo_destroy(bo);
241 }
242 mutex_unlock(&vc4->bo_lock);
243}
244
245void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
246{
247 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
248
249 mutex_lock(&vc4->purgeable.lock);
250 list_add_tail(&bo->size_head, &vc4->purgeable.list);
251 vc4->purgeable.num++;
252 vc4->purgeable.size += bo->base.base.size;
253 mutex_unlock(&vc4->purgeable.lock);
254}
255
256static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
257{
258 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
259
260
261
262
263
264
265
266
267
268
269
270
271
272 list_del_init(&bo->size_head);
273 vc4->purgeable.num--;
274 vc4->purgeable.size -= bo->base.base.size;
275}
276
277void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
278{
279 struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
280
281 mutex_lock(&vc4->purgeable.lock);
282 vc4_bo_remove_from_purgeable_pool_locked(bo);
283 mutex_unlock(&vc4->purgeable.lock);
284}
285
286static void vc4_bo_purge(struct drm_gem_object *obj)
287{
288 struct vc4_bo *bo = to_vc4_bo(obj);
289 struct drm_device *dev = obj->dev;
290
291 WARN_ON(!mutex_is_locked(&bo->madv_lock));
292 WARN_ON(bo->madv != VC4_MADV_DONTNEED);
293
294 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
295
296 dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
297 bo->base.vaddr = NULL;
298 bo->madv = __VC4_MADV_PURGED;
299}
300
301static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
302{
303 struct vc4_dev *vc4 = to_vc4_dev(dev);
304
305 mutex_lock(&vc4->purgeable.lock);
306 while (!list_empty(&vc4->purgeable.list)) {
307 struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
308 struct vc4_bo, size_head);
309 struct drm_gem_object *obj = &bo->base.base;
310 size_t purged_size = 0;
311
312 vc4_bo_remove_from_purgeable_pool_locked(bo);
313
314
315
316
317
318
319 mutex_unlock(&vc4->purgeable.lock);
320 mutex_lock(&bo->madv_lock);
321
322
323
324
325
326
327
328
329
330
331 if (bo->madv == VC4_MADV_DONTNEED &&
332 list_empty(&bo->size_head) &&
333 !refcount_read(&bo->usecnt)) {
334 purged_size = bo->base.base.size;
335 vc4_bo_purge(obj);
336 }
337 mutex_unlock(&bo->madv_lock);
338 mutex_lock(&vc4->purgeable.lock);
339
340 if (purged_size) {
341 vc4->purgeable.purged_size += purged_size;
342 vc4->purgeable.purged_num++;
343 }
344 }
345 mutex_unlock(&vc4->purgeable.lock);
346}
347
348static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
349 uint32_t size,
350 enum vc4_kernel_bo_type type)
351{
352 struct vc4_dev *vc4 = to_vc4_dev(dev);
353 uint32_t page_index = bo_page_index(size);
354 struct vc4_bo *bo = NULL;
355
356 size = roundup(size, PAGE_SIZE);
357
358 mutex_lock(&vc4->bo_lock);
359 if (page_index >= vc4->bo_cache.size_list_size)
360 goto out;
361
362 if (list_empty(&vc4->bo_cache.size_list[page_index]))
363 goto out;
364
365 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
366 struct vc4_bo, size_head);
367 vc4_bo_remove_from_cache(bo);
368 kref_init(&bo->base.base.refcount);
369
370out:
371 if (bo)
372 vc4_bo_set_label(&bo->base.base, type);
373 mutex_unlock(&vc4->bo_lock);
374 return bo;
375}
376
377
378
379
380
381
382
383
384
385struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
386{
387 struct vc4_dev *vc4 = to_vc4_dev(dev);
388 struct vc4_bo *bo;
389
390 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
391 if (!bo)
392 return ERR_PTR(-ENOMEM);
393
394 bo->madv = VC4_MADV_WILLNEED;
395 refcount_set(&bo->usecnt, 0);
396 mutex_init(&bo->madv_lock);
397 mutex_lock(&vc4->bo_lock);
398 bo->label = VC4_BO_TYPE_KERNEL;
399 vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
400 vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
401 mutex_unlock(&vc4->bo_lock);
402
403 return &bo->base.base;
404}
405
406struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
407 bool allow_unzeroed, enum vc4_kernel_bo_type type)
408{
409 size_t size = roundup(unaligned_size, PAGE_SIZE);
410 struct vc4_dev *vc4 = to_vc4_dev(dev);
411 struct drm_gem_cma_object *cma_obj;
412 struct vc4_bo *bo;
413
414 if (size == 0)
415 return ERR_PTR(-EINVAL);
416
417
418 bo = vc4_bo_get_from_cache(dev, size, type);
419 if (bo) {
420 if (!allow_unzeroed)
421 memset(bo->base.vaddr, 0, bo->base.base.size);
422 return bo;
423 }
424
425 cma_obj = drm_gem_cma_create(dev, size);
426 if (IS_ERR(cma_obj)) {
427
428
429
430
431 vc4_bo_cache_purge(dev);
432 cma_obj = drm_gem_cma_create(dev, size);
433 }
434
435 if (IS_ERR(cma_obj)) {
436
437
438
439
440
441
442
443
444
445
446
447 vc4_bo_userspace_cache_purge(dev);
448 cma_obj = drm_gem_cma_create(dev, size);
449 }
450
451 if (IS_ERR(cma_obj)) {
452 struct drm_printer p = drm_info_printer(vc4->dev->dev);
453 DRM_ERROR("Failed to allocate from CMA:\n");
454 vc4_bo_stats_print(&p, vc4);
455 return ERR_PTR(-ENOMEM);
456 }
457 bo = to_vc4_bo(&cma_obj->base);
458
459
460
461
462
463 bo->madv = __VC4_MADV_NOTSUPP;
464
465 mutex_lock(&vc4->bo_lock);
466 vc4_bo_set_label(&cma_obj->base, type);
467 mutex_unlock(&vc4->bo_lock);
468
469 return bo;
470}
471
472int vc4_dumb_create(struct drm_file *file_priv,
473 struct drm_device *dev,
474 struct drm_mode_create_dumb *args)
475{
476 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
477 struct vc4_bo *bo = NULL;
478 int ret;
479
480 if (args->pitch < min_pitch)
481 args->pitch = min_pitch;
482
483 if (args->size < args->pitch * args->height)
484 args->size = args->pitch * args->height;
485
486 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
487 if (IS_ERR(bo))
488 return PTR_ERR(bo);
489
490 bo->madv = VC4_MADV_WILLNEED;
491
492 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
493 drm_gem_object_put_unlocked(&bo->base.base);
494
495 return ret;
496}
497
498static void vc4_bo_cache_free_old(struct drm_device *dev)
499{
500 struct vc4_dev *vc4 = to_vc4_dev(dev);
501 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
502
503 lockdep_assert_held(&vc4->bo_lock);
504
505 while (!list_empty(&vc4->bo_cache.time_list)) {
506 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
507 struct vc4_bo, unref_head);
508 if (time_before(expire_time, bo->free_time)) {
509 mod_timer(&vc4->bo_cache.time_timer,
510 round_jiffies_up(jiffies +
511 msecs_to_jiffies(1000)));
512 return;
513 }
514
515 vc4_bo_remove_from_cache(bo);
516 vc4_bo_destroy(bo);
517 }
518}
519
520
521
522
523void vc4_free_object(struct drm_gem_object *gem_bo)
524{
525 struct drm_device *dev = gem_bo->dev;
526 struct vc4_dev *vc4 = to_vc4_dev(dev);
527 struct vc4_bo *bo = to_vc4_bo(gem_bo);
528 struct list_head *cache_list;
529
530
531 mutex_lock(&bo->madv_lock);
532 if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
533 vc4_bo_remove_from_purgeable_pool(bo);
534 mutex_unlock(&bo->madv_lock);
535
536 mutex_lock(&vc4->bo_lock);
537
538
539 if (gem_bo->import_attach) {
540 vc4_bo_destroy(bo);
541 goto out;
542 }
543
544
545 if (gem_bo->name) {
546 vc4_bo_destroy(bo);
547 goto out;
548 }
549
550
551
552
553
554 if (!bo->base.vaddr) {
555 vc4_bo_destroy(bo);
556 goto out;
557 }
558
559 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
560 if (!cache_list) {
561 vc4_bo_destroy(bo);
562 goto out;
563 }
564
565 if (bo->validated_shader) {
566 kfree(bo->validated_shader->uniform_addr_offsets);
567 kfree(bo->validated_shader->texture_samples);
568 kfree(bo->validated_shader);
569 bo->validated_shader = NULL;
570 }
571
572
573 bo->madv = __VC4_MADV_NOTSUPP;
574 refcount_set(&bo->usecnt, 0);
575
576 bo->t_format = false;
577 bo->free_time = jiffies;
578 list_add(&bo->size_head, cache_list);
579 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
580
581 vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
582
583 vc4_bo_cache_free_old(dev);
584
585out:
586 mutex_unlock(&vc4->bo_lock);
587}
588
589static void vc4_bo_cache_time_work(struct work_struct *work)
590{
591 struct vc4_dev *vc4 =
592 container_of(work, struct vc4_dev, bo_cache.time_work);
593 struct drm_device *dev = vc4->dev;
594
595 mutex_lock(&vc4->bo_lock);
596 vc4_bo_cache_free_old(dev);
597 mutex_unlock(&vc4->bo_lock);
598}
599
600int vc4_bo_inc_usecnt(struct vc4_bo *bo)
601{
602 int ret;
603
604
605
606
607 if (refcount_inc_not_zero(&bo->usecnt))
608 return 0;
609
610 mutex_lock(&bo->madv_lock);
611 switch (bo->madv) {
612 case VC4_MADV_WILLNEED:
613 if (!refcount_inc_not_zero(&bo->usecnt))
614 refcount_set(&bo->usecnt, 1);
615 ret = 0;
616 break;
617 case VC4_MADV_DONTNEED:
618
619
620
621
622
623
624 case __VC4_MADV_PURGED:
625
626 default:
627
628 ret = -EINVAL;
629 break;
630 }
631 mutex_unlock(&bo->madv_lock);
632
633 return ret;
634}
635
636void vc4_bo_dec_usecnt(struct vc4_bo *bo)
637{
638
639
640
641 if (refcount_dec_not_one(&bo->usecnt))
642 return;
643
644 mutex_lock(&bo->madv_lock);
645 if (refcount_dec_and_test(&bo->usecnt) &&
646 bo->madv == VC4_MADV_DONTNEED)
647 vc4_bo_add_to_purgeable_pool(bo);
648 mutex_unlock(&bo->madv_lock);
649}
650
651static void vc4_bo_cache_time_timer(struct timer_list *t)
652{
653 struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
654
655 schedule_work(&vc4->bo_cache.time_work);
656}
657
658struct dma_buf *
659vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
660{
661 struct vc4_bo *bo = to_vc4_bo(obj);
662 struct dma_buf *dmabuf;
663 int ret;
664
665 if (bo->validated_shader) {
666 DRM_DEBUG("Attempting to export shader BO\n");
667 return ERR_PTR(-EINVAL);
668 }
669
670
671
672
673
674
675 ret = vc4_bo_inc_usecnt(bo);
676 if (ret) {
677 DRM_ERROR("Failed to increment BO usecnt\n");
678 return ERR_PTR(ret);
679 }
680
681 dmabuf = drm_gem_prime_export(dev, obj, flags);
682 if (IS_ERR(dmabuf))
683 vc4_bo_dec_usecnt(bo);
684
685 return dmabuf;
686}
687
688vm_fault_t vc4_fault(struct vm_fault *vmf)
689{
690 struct vm_area_struct *vma = vmf->vma;
691 struct drm_gem_object *obj = vma->vm_private_data;
692 struct vc4_bo *bo = to_vc4_bo(obj);
693
694
695
696
697 mutex_lock(&bo->madv_lock);
698 WARN_ON(bo->madv != __VC4_MADV_PURGED);
699 mutex_unlock(&bo->madv_lock);
700
701 return VM_FAULT_SIGBUS;
702}
703
704int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
705{
706 struct drm_gem_object *gem_obj;
707 unsigned long vm_pgoff;
708 struct vc4_bo *bo;
709 int ret;
710
711 ret = drm_gem_mmap(filp, vma);
712 if (ret)
713 return ret;
714
715 gem_obj = vma->vm_private_data;
716 bo = to_vc4_bo(gem_obj);
717
718 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
719 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
720 return -EINVAL;
721 }
722
723 if (bo->madv != VC4_MADV_WILLNEED) {
724 DRM_DEBUG("mmaping of %s BO not allowed\n",
725 bo->madv == VC4_MADV_DONTNEED ?
726 "purgeable" : "purged");
727 return -EINVAL;
728 }
729
730
731
732
733
734
735 vma->vm_flags &= ~VM_PFNMAP;
736
737
738
739
740
741
742
743
744
745
746
747 vm_pgoff = vma->vm_pgoff;
748 vma->vm_pgoff = 0;
749 ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
750 bo->base.paddr, vma->vm_end - vma->vm_start);
751 vma->vm_pgoff = vm_pgoff;
752
753 if (ret)
754 drm_gem_vm_close(vma);
755
756 return ret;
757}
758
759int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
760{
761 struct vc4_bo *bo = to_vc4_bo(obj);
762
763 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
764 DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
765 return -EINVAL;
766 }
767
768 return drm_gem_cma_prime_mmap(obj, vma);
769}
770
771void *vc4_prime_vmap(struct drm_gem_object *obj)
772{
773 struct vc4_bo *bo = to_vc4_bo(obj);
774
775 if (bo->validated_shader) {
776 DRM_DEBUG("mmaping of shader BOs not allowed.\n");
777 return ERR_PTR(-EINVAL);
778 }
779
780 return drm_gem_cma_prime_vmap(obj);
781}
782
783struct drm_gem_object *
784vc4_prime_import_sg_table(struct drm_device *dev,
785 struct dma_buf_attachment *attach,
786 struct sg_table *sgt)
787{
788 struct drm_gem_object *obj;
789
790 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
791 if (IS_ERR(obj))
792 return obj;
793
794 obj->resv = attach->dmabuf->resv;
795
796 return obj;
797}
798
799static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
800{
801 int ret;
802
803 if (!vc4->v3d)
804 return -ENODEV;
805
806 if (vc4file->bin_bo_used)
807 return 0;
808
809 ret = vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
810 if (ret)
811 return ret;
812
813 return 0;
814}
815
816int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
817 struct drm_file *file_priv)
818{
819 struct drm_vc4_create_bo *args = data;
820 struct vc4_file *vc4file = file_priv->driver_priv;
821 struct vc4_dev *vc4 = to_vc4_dev(dev);
822 struct vc4_bo *bo = NULL;
823 int ret;
824
825 ret = vc4_grab_bin_bo(vc4, vc4file);
826 if (ret)
827 return ret;
828
829
830
831
832
833 bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
834 if (IS_ERR(bo))
835 return PTR_ERR(bo);
836
837 bo->madv = VC4_MADV_WILLNEED;
838
839 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
840 drm_gem_object_put_unlocked(&bo->base.base);
841
842 return ret;
843}
844
845int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
846 struct drm_file *file_priv)
847{
848 struct drm_vc4_mmap_bo *args = data;
849 struct drm_gem_object *gem_obj;
850
851 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
852 if (!gem_obj) {
853 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
854 return -EINVAL;
855 }
856
857
858 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
859
860 drm_gem_object_put_unlocked(gem_obj);
861 return 0;
862}
863
864int
865vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file_priv)
867{
868 struct drm_vc4_create_shader_bo *args = data;
869 struct vc4_file *vc4file = file_priv->driver_priv;
870 struct vc4_dev *vc4 = to_vc4_dev(dev);
871 struct vc4_bo *bo = NULL;
872 int ret;
873
874 if (args->size == 0)
875 return -EINVAL;
876
877 if (args->size % sizeof(u64) != 0)
878 return -EINVAL;
879
880 if (args->flags != 0) {
881 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
882 return -EINVAL;
883 }
884
885 if (args->pad != 0) {
886 DRM_INFO("Pad set: 0x%08x\n", args->pad);
887 return -EINVAL;
888 }
889
890 ret = vc4_grab_bin_bo(vc4, vc4file);
891 if (ret)
892 return ret;
893
894 bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
895 if (IS_ERR(bo))
896 return PTR_ERR(bo);
897
898 bo->madv = VC4_MADV_WILLNEED;
899
900 if (copy_from_user(bo->base.vaddr,
901 (void __user *)(uintptr_t)args->data,
902 args->size)) {
903 ret = -EFAULT;
904 goto fail;
905 }
906
907
908
909 memset(bo->base.vaddr + args->size, 0,
910 bo->base.base.size - args->size);
911
912 bo->validated_shader = vc4_validate_shader(&bo->base);
913 if (!bo->validated_shader) {
914 ret = -EINVAL;
915 goto fail;
916 }
917
918
919
920
921 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
922
923fail:
924 drm_gem_object_put_unlocked(&bo->base.base);
925
926 return ret;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
942 struct drm_file *file_priv)
943{
944 struct drm_vc4_set_tiling *args = data;
945 struct drm_gem_object *gem_obj;
946 struct vc4_bo *bo;
947 bool t_format;
948
949 if (args->flags != 0)
950 return -EINVAL;
951
952 switch (args->modifier) {
953 case DRM_FORMAT_MOD_NONE:
954 t_format = false;
955 break;
956 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
957 t_format = true;
958 break;
959 default:
960 return -EINVAL;
961 }
962
963 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
964 if (!gem_obj) {
965 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
966 return -ENOENT;
967 }
968 bo = to_vc4_bo(gem_obj);
969 bo->t_format = t_format;
970
971 drm_gem_object_put_unlocked(gem_obj);
972
973 return 0;
974}
975
976
977
978
979
980
981
982
983
984int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
985 struct drm_file *file_priv)
986{
987 struct drm_vc4_get_tiling *args = data;
988 struct drm_gem_object *gem_obj;
989 struct vc4_bo *bo;
990
991 if (args->flags != 0 || args->modifier != 0)
992 return -EINVAL;
993
994 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
995 if (!gem_obj) {
996 DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
997 return -ENOENT;
998 }
999 bo = to_vc4_bo(gem_obj);
1000
1001 if (bo->t_format)
1002 args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
1003 else
1004 args->modifier = DRM_FORMAT_MOD_NONE;
1005
1006 drm_gem_object_put_unlocked(gem_obj);
1007
1008 return 0;
1009}
1010
1011int vc4_bo_cache_init(struct drm_device *dev)
1012{
1013 struct vc4_dev *vc4 = to_vc4_dev(dev);
1014 int i;
1015
1016
1017
1018
1019
1020 vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
1021 GFP_KERNEL);
1022 if (!vc4->bo_labels)
1023 return -ENOMEM;
1024 vc4->num_labels = VC4_BO_TYPE_COUNT;
1025
1026 BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
1027 for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
1028 vc4->bo_labels[i].name = bo_type_names[i];
1029
1030 mutex_init(&vc4->bo_lock);
1031
1032 vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
1033
1034 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
1035
1036 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
1037 timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
1038
1039 return 0;
1040}
1041
1042void vc4_bo_cache_destroy(struct drm_device *dev)
1043{
1044 struct vc4_dev *vc4 = to_vc4_dev(dev);
1045 int i;
1046
1047 del_timer(&vc4->bo_cache.time_timer);
1048 cancel_work_sync(&vc4->bo_cache.time_work);
1049
1050 vc4_bo_cache_purge(dev);
1051
1052 for (i = 0; i < vc4->num_labels; i++) {
1053 if (vc4->bo_labels[i].num_allocated) {
1054 DRM_ERROR("Destroying BO cache with %d %s "
1055 "BOs still allocated\n",
1056 vc4->bo_labels[i].num_allocated,
1057 vc4->bo_labels[i].name);
1058 }
1059
1060 if (is_user_label(i))
1061 kfree(vc4->bo_labels[i].name);
1062 }
1063 kfree(vc4->bo_labels);
1064}
1065
1066int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
1067 struct drm_file *file_priv)
1068{
1069 struct vc4_dev *vc4 = to_vc4_dev(dev);
1070 struct drm_vc4_label_bo *args = data;
1071 char *name;
1072 struct drm_gem_object *gem_obj;
1073 int ret = 0, label;
1074
1075 if (!args->len)
1076 return -EINVAL;
1077
1078 name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
1079 if (IS_ERR(name))
1080 return PTR_ERR(name);
1081
1082 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1083 if (!gem_obj) {
1084 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
1085 kfree(name);
1086 return -ENOENT;
1087 }
1088
1089 mutex_lock(&vc4->bo_lock);
1090 label = vc4_get_user_label(vc4, name);
1091 if (label != -1)
1092 vc4_bo_set_label(gem_obj, label);
1093 else
1094 ret = -ENOMEM;
1095 mutex_unlock(&vc4->bo_lock);
1096
1097 drm_gem_object_put_unlocked(gem_obj);
1098
1099 return ret;
1100}
1101